├── .gitignore
├── .gitmodules
├── LICENSE
├── NOTES.md
├── README.md
├── Vagrantfile
├── ansible.cfg
├── attacks
└── ssl_proxy.attack.j2
├── bin
├── libvirt-inventory.py
├── printring
└── redo
├── docs
├── LICENSE
├── TODO.md
└── ansible-command-examples.txt
├── group_vars
└── all.example
├── host_vars
├── swift-storage-01
├── swift-storage-02
└── swift-storage-03
├── hosts
├── library
└── swift-ansible-modules
│ └── keystone_user
├── playbooks
├── remove_disks.yml
├── remove_keystone.yml
└── remove_rings.yml
├── roles
├── authentication
│ ├── files
│ │ └── redhat_logging.conf
│ ├── handlers
│ │ ├── debian.yml
│ │ ├── main.yml
│ │ └── redhat.yml
│ ├── tasks
│ │ ├── common.yml
│ │ ├── debian.yml
│ │ ├── debian_test.yml
│ │ ├── main.yml
│ │ ├── redhat.yml
│ │ └── redhat_test.yml
│ ├── templates
│ │ └── keystone.conf.j2
│ └── vars
│ │ └── main.yml
├── common
│ ├── files
│ │ └── CentOS-Base.repo
│ ├── handlers
│ │ ├── debian.yml
│ │ ├── main.yml
│ │ └── redhat.yml
│ ├── tasks
│ │ ├── debian.yml
│ │ ├── debian_ntp.yml
│ │ ├── debian_openstack_repository.yml
│ │ ├── debian_package_cache.yml
│ │ ├── main.yml
│ │ ├── redhat.yml
│ │ ├── redhat_ntp.yml
│ │ ├── redhat_openstack_repository.yml
│ │ └── redhat_package_cache.yml
│ ├── templates
│ │ ├── 01proxy.j2
│ │ └── timezone.j2
│ └── vars
│ │ └── main.yml
├── lbssl
│ ├── handlers
│ │ └── main.yml
│ ├── tasks
│ │ ├── common.yml
│ │ ├── common_pound_service.yml
│ │ ├── common_pound_ssl.yml
│ │ ├── debian.yml
│ │ ├── main.yml
│ │ └── redhat.yml
│ ├── templates
│ │ ├── default_pound.j2
│ │ ├── redhat_pound.cfg.j2
│ │ └── ubuntu_pound.cfg.j2
│ └── vars
│ │ └── main.yml
├── package_cache
│ ├── handlers
│ │ ├── debian.yml
│ │ ├── main.yml
│ │ └── redhat.yml
│ └── tasks
│ │ ├── debian.yml
│ │ ├── debian_configure.yml
│ │ ├── debian_install.yml
│ │ ├── debian_service.yml
│ │ ├── main.yml
│ │ ├── redhat.yml
│ │ ├── redhat_configure.yml
│ │ ├── redhat_install.yml
│ │ └── redhat_service.yml
├── proxy
│ ├── handlers
│ │ └── main.yml
│ ├── tasks
│ │ ├── common.yml
│ │ ├── debian.yml
│ │ ├── debian_memcached_config.yml
│ │ ├── main.yml
│ │ ├── redhat.yml
│ │ └── redhat_memcached_config.yml
│ ├── templates
│ │ ├── object-expirer.conf.j2
│ │ └── proxy-server.conf.j2
│ └── vars
│ │ └── main.yml
├── storage
│ ├── files
│ │ └── redhat_rsync.init
│ ├── tasks
│ │ ├── common.yml
│ │ ├── common_build_rings.yml
│ │ ├── debian.yml
│ │ ├── main.yml
│ │ └── redhat.yml
│ ├── templates
│ │ ├── account-replication.conf.j2
│ │ ├── account-server.conf.j2
│ │ ├── container-replication.conf.j2
│ │ ├── container-server.conf.j2
│ │ ├── object-replication.conf.j2
│ │ ├── object-server.conf.j2
│ │ └── rsyncd.conf.j2
│ └── vars
│ │ └── main.yml
├── swift_common
│ ├── tasks
│ │ ├── common.yml
│ │ ├── debian.yml
│ │ ├── main.yml
│ │ └── redhat.yml
│ ├── templates
│ │ └── swift.conf.j2
│ └── vars
│ │ └── main.yml
└── swiftclient
│ ├── tasks
│ └── main.yml
│ └── templates
│ ├── adminrc.j2
│ └── testrc.j2
├── site.yml
├── start_proxy.yml
├── start_storage.yml
└── tests
├── gauntlt.yml
├── swiftclient.yml
└── tests.yml
/.gitignore:
--------------------------------------------------------------------------------
1 | credentials
2 | .vagrant
3 | group_vars/all
4 | group_vars/all.old
5 | *.swp
6 | .DS_store
7 | tmp
8 | fetch
9 | library/test-modules
10 | library/openstack
11 |
--------------------------------------------------------------------------------
/.gitmodules:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ccollicutt/swiftacular/c44df81893ae85154949701bb9d00252c03dff1f/.gitmodules
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Copyright (c) 2013, curtis@serverascode.com
2 | All rights reserved.
3 |
4 | Redistribution and use in source and binary forms, with or without
5 | modification, are permitted provided that the following conditions are met:
6 |
7 | * Redistributions of source code must retain the above copyright notice, this
8 | list of conditions and the following disclaimer.
9 |
10 | * Redistributions in binary form must reproduce the above copyright notice,
11 | this list of conditions and the following disclaimer in the documentation
12 | and/or other materials provided with the distribution.
13 |
14 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
15 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
17 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
18 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
20 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
21 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
22 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
--------------------------------------------------------------------------------
/NOTES.md:
--------------------------------------------------------------------------------
1 | #NOTES
2 |
3 | ## Vagrant boxes used
4 |
5 | * RedHat
6 |
7 | ```bash
8 | curtis$ wget http://developer.nrel.gov/downloads/vagrant-boxes/CentOS-6.5-x86_64-v20140311.box
9 | ```
10 |
11 | * Ubuntu - Image provided by Vagrant
12 |
13 | ##Miscellaneous commands
14 |
15 | * Limit to one group:
16 |
17 | ```bash
18 | curtis$ pb -l package_cache site.yml
19 | ```
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | ```
2 | _________ .__ _____ __ .__
3 | / _____/_ _ _|__|/ ____\/ |______ ____ __ __| | _____ _______
4 | \_____ \\ \/ \/ / \ __\\ __\__ \ _/ ___\| | \ | \__ \\_ __ \
5 | / \\ /| || | | | / __ \\ \___| | / |__/ __ \| | \/
6 | /_______ / \/\_/ |__||__| |__| (____ /\___ >____/|____(____ /__|
7 | \/ \/ \/ \/
8 | ```
9 |
10 | # OpenStack Swift and Ansible
11 |
12 | This repository will create a virtualized OpenStack Swift cluster using Vagrant, VirtualBox, Ansible.
13 |
14 | #### Table of Contents
15 |
16 | 1. [Too long; didn't read](#tldr)
17 | 2. [Features](#features)
18 | 3. [Requirements](#requirements)
19 | 4. [Networking setup](#networking-setup)
20 | 5. [Starting over](#starting-over)
21 | 6. [Development environment](#development-environment)
22 | 7. [Modules](#modules)
23 | 8. [Future work](#future-work)
24 | 9. [Issues](#issues)
25 | 10. [Notes](#notes)
26 |
27 | ## tl;dr
28 |
29 | *Note this will start seven virtual machines on your computer.*
30 |
31 | ```bash
32 | $ git clone git@github.com:curtisgithub/swiftacular.git
33 | $ cd swiftacular
34 | # Checkout some modules to help with managing openstack
35 | $ git clone https://github.com/openstack-ansible/openstack-ansible-modules library/openstack
36 | $ vagrant up
37 | $ cp group_vars/all.example group_vars/all # and edit if desired
38 | $ ansible-playbook site.yml
39 | ```
40 |
41 | ## Supported Operating Systems and OpenStack Releases
42 |
43 | * CentOS 6.5 with OpenStack Havana packages
44 | * Ubuntu 12.04 with OpenStack Havana packages
45 | * Ubuntu 14.04 with OpenStack Icehouse packages
46 |
47 | Ubuntu 14.04 is probably the most tested version right now, then Ubuntu 12.04, followed up by Redhat/CentOS 6.5+.
48 |
49 | The Vagrantfile has the above boxes in place with Ubuntu 12.04 being the default uncommented box. To use one of the other operating systems as the basis for Swiftacular, simply uncomment the OS you would like to use in the Vagrant file, and make sure the other boxes are commented out.
50 |
51 | ## Features
52 |
53 | * Run OpenStack Swift in vms on your local computer, but with multiple servers
54 | * Replication network is used, which means this could be a basis for a geo-replication system
55 | * SSL - Keystone is configured to use SSL and the Swift Proxy is proxied by an SSL server
56 | * Sparse files to back Swift disks
57 | * Tests for uploading files into Swift
58 | * Use of [gauntlt](http://gauntlt.org/) attacks to verify installation
59 | * Supports Ubuntu Precise 12.04, Trusty 14.04 and CentOS 6.5
60 |
61 | ## Requirements
62 |
63 | * Vagrant and Virtualbox
64 | * For Ubuntu I am using the official Vagrant Precise64 images
65 | * For CentOS 6 I am using the [Vagrant box](http://puppet-vagrant-boxes.puppetlabs.com/centos-65-x64-virtualbox-nocm.box) provided by Puppet Labs
66 | * Enough resources on your computer to run seven vms
67 |
68 | ## Virtual machines created
69 |
70 | Seven Vagrant-based virtual machines are used for this playbook:
71 |
72 | * __package_cache__ - One apt-cacher-ng server so that you don't have to download packages from the Internet over and over again, only once
73 | * __authentication__ - One Keystone server for authentication
74 | * __lbssl__ - One SSL termination server that will be used to proxy connections to the Swift Proxy server
75 | * __swift-proxy__ - One Swift proxy server
76 | * __swift-storage__ - Three Swift storage nodes
77 |
78 | ## Networking setup
79 |
80 | Each vm will have four networks (technically five including the Vagrant network). In a real production system every server would not need to be attached to every network, and in fact you would want to avoid that. In this case, they are all attached to every network.
81 |
82 | * __eth0__ - Used by Vagrant
83 | * __eth1__ - 192.168.100.0/24 - The "public" network that users would connect to
84 | * __eth2__ - 10.0.10.0/24 - This is the network between the SSL terminator and the Swift Proxy
85 | * __eth3__ - 10.0.20.0/24 - The local Swift internal network
86 | * __eth4__ - 10.0.30.0/24 - The replication network which is a feature of OpenStack Swift starting with the Havana release
87 |
88 | ## Self-signed certificates
89 |
90 | Because this playbook configures self-signed SSL certificates and by default the swift client will complain about that fact, either the --insecure
option needs to be used or alternatively the SWIFTCLIENT_INSECURE
environment variable can be set to true.
91 |
92 | ## Using the swift command line client
93 |
94 | You can install the swift client anywhere that you have access to the SSL termination point and Keystone. So you could put it on your local laptop as well, probably with:
95 |
96 | ```bash
97 | $ pip install python-swiftclient
98 | ```
99 |
100 | However, I usually login to the package_cache server and use swift from there.
101 |
102 | ```bash
103 | $ vagrant ssh swift-package-cache-01
104 | vagrant@swift-package-cache-01:~$ . testrc
105 | vagrant@swift-package-cache-01:~$ swift list
106 | vagrant@swift-package-cache-01:~$ echo "swift is cool" > swift.txt
107 | vagrant@swift-package-cache-01:~$ swift upload swifty swift.txt
108 | swift.txt
109 | vagrant@swift-package-cache-01:~$ swift list
110 | swifty
111 | vagrant@swift-package-cache-01:~$ swift list swifty
112 | swift.txt
113 | ```
114 |
115 | ## Starting over
116 |
117 | If you want to redo the installation there are a few ways.
118 |
119 | To restart completely:
120 |
121 | ```bash
122 | $ vagrant destroy -f
123 | $ vagrant up
124 | # wait...
125 | $ ansible-playbook site.yml
126 | ```
127 |
128 | There is a script to destroy and rebuild everything but the package cache:
129 |
130 | ```bash
131 | $ ./bin/redo
132 | $ ansible -m ping all # just to check if networking is up
133 | $ ansible-playbook site.yml
134 | ```
135 |
136 | To remove and redo only the rings and fake/sparse disks without destroying any virtual machines:
137 |
138 | ```bash
139 | $ ansible-playbook playbooks/remove_rings.yml
140 | $ ansible-playbook site.yml
141 | ```
142 |
143 | To remove the keystone database and redo the endpoints, users, regions, etc:
144 |
145 | ```bash
146 | $ ansible-playbook ./playbook/remove_keystone.yml
147 | $ ansible-playbook site.yml
148 | ```
149 |
150 | ## Development environment
151 |
152 | This playbook was developed in the following environment:
153 |
154 | * OSX 10.8.2
155 | * Ansible 1.4
156 | * Virtualbox 4.2.6
157 | * Vagrant 1.3.5
158 |
159 | ## Modules
160 |
161 | There is an swift-ansible-modules directory in the library directory that contains a couple of modules taken from the official Ansible modules as well as the [openstack-ansible-modules](https://github.com/lorin/openstack-ansible) and for now both have been modified to allow the "insecure" option, which means self-signed certificates. I hope to get those changes into their respective repositories soon.
162 |
163 | ## Future work
164 |
165 | See the [issues](https://github.com/curtisgithub/swiftacular/issues) in the tracking system on Github for Swiftacular with the enhancement label.
166 |
167 | ## Issues
168 |
169 | See the [issues](https://github.com/curtisgithub/swiftacular/issues) in the tracking tracking system on Github for Swiftacular.
170 |
171 | ## Notes
172 |
173 | * I know that Vagrant can automatically start Ansible playbooks on the creation of a vm, but I prefer to run the playbook manually
174 | * LXC is likely a better fit than Virtualbox given all the vms are the same OS and we don't need to boot any vms within vms inception style
175 | * Starting the vms is a bit slow I believe because of the extra networks
176 |
--------------------------------------------------------------------------------
/Vagrantfile:
--------------------------------------------------------------------------------
1 | # -*- mode: ruby -*-
2 | # vi: set ft=ruby :
3 |
4 | nodes = {
5 | 'swift-package-cache' => [1, 20],
6 | 'swift-keystone' => [1, 50],
7 | 'swift-lbssl' => [1, 30],
8 | 'swift-proxy' => [1, 100],
9 | 'swift-storage' => [3, 200],
10 | }
11 |
12 | Vagrant.configure("2") do |config|
13 | #config.vm.box = "trusty64"
14 | #config.vm.box_url = "https://cloud-images.ubuntu.com/vagrant/trusty/current/trusty-server-cloudimg-amd64-vagrant-disk1.box"
15 | #config.vm.box = "centos65"
16 | #config.vm.box_url = "http://puppet-vagrant-boxes.puppetlabs.com/centos-65-x64-virtualbox-nocm.box"
17 | config.vm.box = "precise64"
18 | config.vm.box_url = "http://files.vagrantup.com/precise64.box"
19 |
20 | nodes.each do |prefix, (count, ip_start)|
21 | count.times do |i|
22 | hostname = "%s-%02d" % [prefix, (i+1)]
23 |
24 | config.vm.provider :virtualbox do |v|
25 | v.customize ["modifyvm", :id, "--memory", 1024]
26 | end
27 |
28 | config.vm.define "#{hostname}" do |box|
29 | puts "working on #{hostname} with ip of 192.168.100.#{ip_start+i}"
30 |
31 | box.vm.hostname = "#{hostname}.example.com"
32 |
33 | #
34 | # Networks
35 | #
36 |
37 | # Public
38 | box.vm.network :private_network, :ip => "192.168.100.#{ip_start+i}", :netmask => "255.255.255.0"
39 |
40 | # SSL and loadbalancing
41 | box.vm.network :private_network, :ip => "10.0.10.#{ip_start+i}", :netmask => "255.255.255.0"
42 |
43 | # Internal
44 | box.vm.network :private_network, :ip => "10.0.20.#{ip_start+i}", :netmask => "255.255.255.0"
45 |
46 | # Replication
47 | box.vm.network :private_network, :ip => "10.0.30.#{ip_start+i}", :netmask => "255.255.255.0"
48 |
49 | end
50 | end
51 | end
52 | end
53 |
--------------------------------------------------------------------------------
/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 | sudo=True
3 | remote_user=vagrant
4 | hostfile=hosts
5 | host_key_checking=False
6 | private_key_file=~/.vagrant.d/insecure_private_key
7 |
--------------------------------------------------------------------------------
/attacks/ssl_proxy.attack.j2:
--------------------------------------------------------------------------------
1 | @slow
2 |
3 | Feature: nmap attacks swift installation
4 | Background:
5 | Given "nmap" is installed
6 | And the following profile:
7 | | name | value |
8 | | hostname | {{ swift_proxy_ssl_proxy_server }} |
9 |
10 | Scenario: Verify the SSL termination server is open on expected set of ports using the nmap fast flag
11 | When I launch an "nmap" attack with:
12 | """
13 | nmap -F
14 | """
15 | Then the output should match:
16 | """
17 | 22/tcp\s+open
18 | """
19 |
20 | Then the output should match:
21 | """
22 | 443/tcp\s+open
23 | """
24 |
25 | Then the output should not contain:
26 | """
27 | 80/tcp
28 | """
--------------------------------------------------------------------------------
/bin/libvirt-inventory.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 |
3 | #from BeautifulSoup import BeautifulSoup
4 |
5 | from xml.dom import minidom
6 | import libvirt
7 | import sys
8 | import json
9 |
10 | #
11 | # Want to create ansible_ssh_host entries for each server.
12 | # _meta is a high level entry in the json output, same level
13 | # as the server groups.
14 | #
15 | def addMeta(vmName, inventory, group):
16 |
17 | if not inventory['_meta']:
18 | inventory['_meta'] = {}
19 | inventory['_meta']['hostvars'] = {}
20 |
21 | if not vmName in inventory['_meta']:
22 | inventory['_meta']['hostvars'][vmName] = {}
23 | inventory['_meta']['hostvars'][vmName]['ansible_ssh_host'] = ip
24 |
25 | return True
26 |
27 | #
28 | # Connect to the hypervisor
29 | #
30 | conn = libvirt.open("qemu:///system")
31 | if conn == None:
32 | print 'Failed to open connection to hypervisor'
33 | sys.exit(1)
34 |
35 | #
36 | # Create all the groups in the inventory
37 | #
38 | groups = ['authentication', 'lbssl', 'swiftclient', 'package_cache', 'proxy', 'storage']
39 | inventory = {}
40 | inventory['_meta'] = {}
41 | for group in groups:
42 | if not group in inventory:
43 | inventory[group] = {
44 | 'hosts' : [],
45 | }
46 |
47 | #
48 | # Find all active vms and add them into the inventory by finding
49 | # their IP from the default.leases file
50 | #
51 | for vm in conn.listAllDomains():
52 |
53 | if vm.isActive():
54 | xmlDoc = minidom.parseString(vm.XMLDesc())
55 | interfaces = xmlDoc.getElementsByTagName('mac')
56 |
57 | mac = interfaces[0].getAttribute('address')
58 |
59 | # Open leases and search for the mac address
60 | leases = '/var/lib/libvirt/dnsmasq/default.leases'
61 | with open(leases, 'r') as fh:
62 | for line in fh.readlines():
63 | col = line.split()
64 | if col[1] == mac:
65 | ip = col[2]
66 | break
67 |
68 | # ugh
69 | if 'keystone' in vm.name():
70 | inventory['authentication']['hosts'].append(vm.name())
71 | addMeta(vm.name(), inventory, 'authentication')
72 | elif 'storage' in vm.name():
73 | inventory['storage']['hosts'].append(vm.name())
74 | addMeta(vm.name(), inventory, 'storage')
75 | elif 'package-cache' in vm.name():
76 | # Using the package cache server as the swiftclient as well
77 | inventory['package_cache']['hosts'].append(vm.name())
78 | inventory['swiftclient']['hosts'].append(vm.name())
79 | addMeta(vm.name(), inventory, 'package_cache')
80 | addMeta(vm.name(), inventory, 'swiftclient')
81 | elif 'proxy' in vm.name():
82 | inventory['proxy']['hosts'].append(vm.name())
83 | addMeta(vm.name(), inventory, 'proxy')
84 | elif 'lbssl' in vm.name():
85 | inventory['lbssl']['hosts'].append(vm.name())
86 | addMeta(vm.name(), inventory, 'lbssl')
87 |
88 | print json.dumps(inventory, indent=4)
89 |
--------------------------------------------------------------------------------
/bin/printring:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | ansible -m shell -a "swift-ring-builder /etc/swift/account.builder" swift-proxy-01
4 | ansible -m shell -a "swift-ring-builder /etc/swift/container.builder" swift-proxy-01
5 | ansible -m shell -a "swift-ring-builder /etc/swift/object.builder" swift-proxy-01
6 |
--------------------------------------------------------------------------------
/bin/redo:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | for s in keystone-01 proxy-01 storage-01 storage-02 storage-03 lbssl-01; do
4 | echo "Deleting swift-$s"
5 | vagrant destroy -f swift-$s
6 | sleep 1
7 | done
8 |
9 | sleep 2
10 |
11 | vagrant up
12 |
--------------------------------------------------------------------------------
/docs/LICENSE:
--------------------------------------------------------------------------------
1 | This program is free software: you can redistribute it and/or modify
2 | it under the terms of the GNU General Public License as published by
3 | the Free Software Foundation, either version 3 of the License, or
4 | at your option) any later version.
5 |
6 | This program is distributed in the hope that it will be useful,
7 | but WITHOUT ANY WARRANTY; without even the implied warranty of
8 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 | GNU General Public License for more details.
10 |
11 | You should have received a copy of the GNU General Public License
12 | along with this program. If not, see .
--------------------------------------------------------------------------------
/docs/TODO.md:
--------------------------------------------------------------------------------
1 | # TODO
2 | * add an rsync test
3 | * add an upstart job to remount disk images
4 | * Is swiftoperator the right role for users?
5 | * setup tags?
6 | * use upstarts to start with services instead of swift-init all
7 | * Demo tenant is not the correct one to create?
8 | * work on notifications to restart services?
9 | * check ssl
10 | * shouldn't restart services on runs after the first one unless required
11 | * create example all.example
12 | * Fix tasks that always return changed, ie. pound startup config
13 | * Will rings be built 3 times in storage main.yml when they only need to be built once?
14 | * See what's happening here: https://github.com/lorin/openstack-ansible/tree/master/services/swift
15 | * Add an ansible config file instead of the ansiblerc?
16 | * Submit insecure option back to upsteam for ansible swift modules
17 | * write/read affinity for swift -- though we only have one region
18 | * increase rsync connnections
19 | * are all services actually running on the storage nodes? ie. does swift-init all start actually complete? ansible seems to think so.
20 | * EC2 compatability verify, ssl, etc
21 | * automatically generate passwords and tokens on each run in group_vars/all?
22 | * have vagrant create the ansible_hosts file?
23 | * iptables templates for each server
24 | * write a sparse_file module
25 | * Use gather facts for info from other groups instead of delegate_to?
26 | * object-expirer...where should this run?
27 | * make pound -> swift-proxy address a variable in pound.cfg, ie. right now it is hardcoded to 10.0.10.100
28 | * add object expirer? "no object-expirer running"
29 |
--------------------------------------------------------------------------------
/docs/ansible-command-examples.txt:
--------------------------------------------------------------------------------
1 | Running a command across all servers:
2 |
3 | ans -m shell -a "ifconfig | grep 'inet addr'" all
4 | ans -a "ping -c 1 -w 1 news.google.com" all
5 | ans -m shell -a "ping -c 1 -w 1 10.0.10.1 > /dev/null" all
6 |
7 | limit to running on lbssl and package_cache:
8 |
9 | pb site.yml --limit lbssl,package_cache,common
10 |
11 | list objects on storage servers:
12 |
13 | ans -m shell -a "ls /srv/node/*/objects" storage
--------------------------------------------------------------------------------
/group_vars/all.example:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | #
4 | # Server IPs - probably don't change this :)
5 | #
6 |
7 | keystone_server: "{{ hostvars[groups['authentication'][0]]['ansible_eth1']['ipv4']['address'] }}"
8 | swift_proxy_server: "{{ hostvars[groups['proxy'][0]]['ansible_eth1']['ipv4']['address'] }}"
9 | swift_proxy_ssl_proxy_server: "{{ hostvars[groups['lbssl'][0]]['ansible_eth1']['ipv4']['address'] }}"
10 | package_cache_server: "{{ hostvars[groups['package_cache'][0]]['ansible_eth1']['ipv4']['address'] }}"
11 |
12 | #
13 | # Timezone and NTP
14 | #
15 |
16 | timezone_area: America
17 | timezone_city: Edmonton
18 | time_server: ntp.ubuntu.com
19 |
20 | #
21 | # Disks and replica settings
22 | #
23 |
24 | disk_prefix: td
25 | partition_power: 12
26 | replicas: 2
27 | min_part_hours: 1
28 | # Size of the sparse image to be mounted *in gigabytes*
29 | loop_disk_size: 500
30 |
31 | #
32 | # Swift hash settings
33 | #
34 |
35 | swift_hash_path_suffix: CHANGEME
36 | swift_hash_path_prefix: CHANGEME
37 |
38 | #
39 | # Keystone region
40 | #
41 |
42 | keystone_region: Edmonton
43 |
44 | #
45 | # Keystone roles, tokens and passwords
46 | #
47 |
48 | keystone_mysql_password: CHANGEME
49 | keystone_admin_token: CHANGEME
50 | keystone_admin_role: admin
51 | keystone_admin_user: admin
52 | keystone_admin_tenant: admin
53 | keystone_admin_user_password: CHANGEME
54 | keystone_generic_service_password: CHANGEME
55 |
56 | #
57 | # Configure a test user to run tests with
58 | #
59 |
60 | keystone_test_user: achilles
61 | keystone_test_user_password: CHANGEME
62 |
--------------------------------------------------------------------------------
/host_vars/swift-storage-01:
--------------------------------------------------------------------------------
1 | zone: 1
2 | region: 1
3 | disks: 3
--------------------------------------------------------------------------------
/host_vars/swift-storage-02:
--------------------------------------------------------------------------------
1 | zone: 2
2 | region: 1
3 | disks: 3
--------------------------------------------------------------------------------
/host_vars/swift-storage-03:
--------------------------------------------------------------------------------
1 | zone: 3
2 | region: 1
3 | disks: 3
--------------------------------------------------------------------------------
/hosts:
--------------------------------------------------------------------------------
1 |
2 | # Keystone
3 | [authentication]
4 | swift-keystone-01 ansible_ssh_host=192.168.100.50
5 |
6 | # Loadbalancing and SSL
7 | [lbssl]
8 | swift-lbssl-01 ansible_ssh_host=192.168.100.30
9 |
10 | # A place to run swift cli tests
11 | [swiftclient]
12 | swift-package-cache-01 ansible_ssh_host=192.168.100.20
13 |
14 | # Gauntlt is for running security checks
15 | # See: http://gauntlt.org/
16 | [gauntlt]
17 | swift-package-cache-01 ansible_ssh_host=192.168.100.20
18 |
19 | # apt-cacher-ng
20 | [package_cache]
21 | swift-package-cache-01 ansible_ssh_host=192.168.100.20
22 |
23 | # Swift proxy node
24 | [proxy]
25 | swift-proxy-01 ansible_ssh_host=192.168.100.100
26 |
27 | # Swift storage node
28 | [storage]
29 | swift-storage-01 ansible_ssh_host=192.168.100.200
30 | swift-storage-02 ansible_ssh_host=192.168.100.201
31 | swift-storage-03 ansible_ssh_host=192.168.100.202
32 |
--------------------------------------------------------------------------------
/library/swift-ansible-modules/keystone_user:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 | # -*- coding: utf-8 -*-
3 |
4 | # Based on Jimmy Tang's implementation
5 |
6 | DOCUMENTATION = '''
7 | ---
8 | module: keystone_user
9 | short_description: Manage OpenStack Identity (keystone) users, tenants and roles
10 | description:
11 | - Manage users,tenants, roles from OpenStack.
12 | options:
13 | login_user:
14 | description:
15 | - login username to authenticate to keystone
16 | required: false
17 | default: admin
18 | login_password:
19 | description:
20 | - Password of login user
21 | required: false
22 | default: 'yes'
23 | login_tenant_name:
24 | description:
25 | - The tenant login_user belongs to
26 | required: false
27 | default: None
28 | token:
29 | description:
30 | - The token to be uses in case the password is not specified
31 | required: false
32 | default: None
33 | endpoint:
34 | description:
35 | - The keystone url for authentication
36 | required: false
37 | default: 'http://127.0.0.1:35357/v2.0/'
38 | user:
39 | description:
40 | - The name of the user that has to added/removed from OpenStack
41 | required: false
42 | default: None
43 | password:
44 | description:
45 | - The password to be assigned to the user
46 | required: false
47 | default: None
48 | tenant:
49 | description:
50 | - The tenant name that has be added/removed
51 | required: false
52 | default: None
53 | description:
54 | description:
55 | - A description for the tenant
56 | required: false
57 | default: None
58 | email:
59 | description:
60 | - An email address for the user
61 | required: false
62 | default: None
63 | role:
64 | description:
65 | - The name of the role to be assigned or created
66 | required: false
67 | default: None
68 | state:
69 | description:
70 | - Indicate desired state of the resource
71 | choices: ['present', 'absent']
72 | default: present
73 | requirements: [ python-keystoneclient ]
74 | author: Lorin Hochstein
75 | '''
76 |
77 | EXAMPLES = '''
78 | # Create a tenant
79 | - keystone_user: tenant=demo tenant_description="Default Tenant"
80 |
81 | # Create a user
82 | - keystone_user: user=john tenant=demo password=secrete
83 |
84 | # Apply the admin role to the john user in the demo tenant
85 | - keystone_user: role=admin user=john tenant=demo
86 | '''
87 |
88 | try:
89 | from keystoneclient.v2_0 import client
90 | except ImportError:
91 | keystoneclient_found = False
92 | else:
93 | keystoneclient_found = True
94 |
95 |
96 | def authenticate(endpoint, token, login_user, login_password, login_tenant_name):
97 | """Return a keystone client object"""
98 |
99 | if token:
100 | return client.Client(endpoint=endpoint, token=token, insecure=True)
101 | else:
102 | return client.Client(auth_url=endpoint, username=login_user,
103 | password=login_password, tenant_name=login_tenant_name,
104 | insecure=True)
105 |
106 |
107 | def tenant_exists(keystone, tenant):
108 | """ Return True if tenant already exists"""
109 | return tenant in [x.name for x in keystone.tenants.list()]
110 |
111 |
112 | def user_exists(keystone, user):
113 | """" Return True if user already exists"""
114 | return user in [x.name for x in keystone.users.list()]
115 |
116 |
117 | def get_tenant(keystone, name):
118 | """ Retrieve a tenant by name"""
119 | tenants = [x for x in keystone.tenants.list() if x.name == name]
120 | count = len(tenants)
121 | if count == 0:
122 | raise KeyError("No keystone tenants with name %s" % name)
123 | elif count > 1:
124 | raise ValueError("%d tenants with name %s" % (count, name))
125 | else:
126 | return tenants[0]
127 |
128 |
129 | def get_user(keystone, name):
130 | """ Retrieve a user by name"""
131 | users = [x for x in keystone.users.list() if x.name == name]
132 | count = len(users)
133 | if count == 0:
134 | raise KeyError("No keystone users with name %s" % name)
135 | elif count > 1:
136 | raise ValueError("%d users with name %s" % (count, name))
137 | else:
138 | return users[0]
139 |
140 |
141 | def get_role(keystone, name):
142 | """ Retrieve a role by name"""
143 | roles = [x for x in keystone.roles.list() if x.name == name]
144 | count = len(roles)
145 | if count == 0:
146 | raise KeyError("No keystone roles with name %s" % name)
147 | elif count > 1:
148 | raise ValueError("%d roles with name %s" % (count, name))
149 | else:
150 | return roles[0]
151 |
152 |
153 | def get_tenant_id(keystone, name):
154 | return get_tenant(keystone, name).id
155 |
156 |
157 | def get_user_id(keystone, name):
158 | return get_user(keystone, name).id
159 |
160 |
161 | def ensure_tenant_exists(keystone, tenant_name, tenant_description,
162 | check_mode):
163 | """ Ensure that a tenant exists.
164 |
165 | Return (True, id) if a new tenant was created, (False, None) if it
166 | already existed.
167 | """
168 |
169 | # Check if tenant already exists
170 | try:
171 | tenant = get_tenant(keystone, tenant_name)
172 | except KeyError:
173 | # Tenant doesn't exist yet
174 | pass
175 | else:
176 | if tenant.description == tenant_description:
177 | return (False, tenant.id)
178 | else:
179 | # We need to update the tenant description
180 | if check_mode:
181 | return (True, tenant.id)
182 | else:
183 | tenant.update(description=tenant_description)
184 | return (True, tenant.id)
185 |
186 | # We now know we will have to create a new tenant
187 | if check_mode:
188 | return (True, None)
189 |
190 | ks_tenant = keystone.tenants.create(tenant_name=tenant_name,
191 | description=tenant_description,
192 | enabled=True)
193 | return (True, ks_tenant.id)
194 |
195 |
196 | def ensure_tenant_absent(keystone, tenant, check_mode):
197 | """ Ensure that a tenant does not exist
198 |
199 | Return True if the tenant was removed, False if it didn't exist
200 | in the first place
201 | """
202 | if not tenant_exists(keystone, tenant):
203 | return False
204 |
205 | # We now know we will have to delete the tenant
206 | if check_mode:
207 | return True
208 |
209 |
210 | def ensure_user_exists(keystone, user_name, password, email, tenant_name,
211 | check_mode):
212 | """ Check if user exists
213 |
214 | Return (True, id) if a new user was created, (False, id) user alrady
215 | exists
216 | """
217 |
218 | # Check if tenant already exists
219 | try:
220 | user = get_user(keystone, user_name)
221 | except KeyError:
222 | # Tenant doesn't exist yet
223 | pass
224 | else:
225 | # User does exist, we're done
226 | return (False, user.id)
227 |
228 | # We now know we will have to create a new user
229 | if check_mode:
230 | return (True, None)
231 |
232 | tenant = get_tenant(keystone, tenant_name)
233 |
234 | user = keystone.users.create(name=user_name, password=password,
235 | email=email, tenant_id=tenant.id)
236 | return (True, user.id)
237 |
238 |
239 | def ensure_role_exists(keystone, user_name, tenant_name, role_name,
240 | check_mode):
241 | """ Check if role exists
242 |
243 | Return (True, id) if a new role was created or if the role was newly
244 | assigned to the user for the tenant. (False, id) if the role already
245 | exists and was already assigned to the user ofr the tenant.
246 |
247 | """
248 | # Check if the user has the role in the tenant
249 | user = get_user(keystone, user_name)
250 | tenant = get_tenant(keystone, tenant_name)
251 | roles = [x for x in keystone.roles.roles_for_user(user, tenant)
252 | if x.name == role_name]
253 | count = len(roles)
254 |
255 | if count == 1:
256 | # If the role is in there, we are done
257 | role = roles[0]
258 | return (False, role.id)
259 | elif count > 1:
260 | # Too many roles with the same name, throw an error
261 | raise ValueError("%d roles with name %s" % (count, role_name))
262 |
263 | # At this point, we know we will need to make changes
264 | if check_mode:
265 | return (True, None)
266 |
267 | # Get the role if it exists
268 | try:
269 | role = get_role(keystone, role_name)
270 | except KeyError:
271 | # Role doesn't exist yet
272 | role = keystone.roles.create(role_name)
273 |
274 | # Associate the role with the user in the admin
275 | keystone.roles.add_user_role(user, role, tenant)
276 | return (True, role.id)
277 |
278 |
279 | def ensure_user_absent(keystone, user, check_mode):
280 | raise NotImplementedError("Not yet implemented")
281 |
282 |
283 | def ensure_role_absent(keystone, uesr, tenant, role, check_mode):
284 | raise NotImplementedError("Not yet implemented")
285 |
286 |
287 | def main():
288 |
289 | module = AnsibleModule(
290 | argument_spec=dict(
291 | user=dict(required=False),
292 | password=dict(required=False),
293 | tenant=dict(required=False),
294 | tenant_description=dict(required=False),
295 | email=dict(required=False),
296 | role=dict(required=False),
297 | state=dict(default='present', choices=['present', 'absent']),
298 | endpoint=dict(required=False,
299 | default="http://127.0.0.1:35357/v2.0"),
300 | token=dict(required=False),
301 | login_user=dict(required=False),
302 | login_password=dict(required=False),
303 | login_tenant_name=dict(required=False)
304 | ),
305 | supports_check_mode=True,
306 | mutually_exclusive=[['token', 'login_user'],
307 | ['token', 'login_password'],
308 | ['token', 'login_tenant_name']]
309 | )
310 |
311 | if not keystoneclient_found:
312 | module.fail_json(msg="the python-keystoneclient module is required")
313 |
314 | user = module.params['user']
315 | password = module.params['password']
316 | tenant = module.params['tenant']
317 | tenant_description = module.params['tenant_description']
318 | email = module.params['email']
319 | role = module.params['role']
320 | state = module.params['state']
321 | endpoint = module.params['endpoint']
322 | token = module.params['token']
323 | login_user = module.params['login_user']
324 | login_password = module.params['login_password']
325 | login_tenant_name = module.params['login_tenant_name']
326 |
327 | keystone = authenticate(endpoint, token, login_user, login_password, login_tenant_name)
328 |
329 | check_mode = module.check_mode
330 |
331 | try:
332 | d = dispatch(keystone, user, password, tenant, tenant_description,
333 | email, role, state, endpoint, token, login_user,
334 | login_password, check_mode)
335 | except Exception as e:
336 | if check_mode:
337 | # If we have a failure in check mode
338 | module.exit_json(changed=True,
339 | msg="exception: %s" % e.message)
340 | else:
341 | module.fail_json(msg=e.message)
342 | else:
343 | module.exit_json(**d)
344 |
345 |
346 | def dispatch(keystone, user=None, password=None, tenant=None,
347 | tenant_description=None, email=None, role=None,
348 | state="present", endpoint=None, token=None, login_user=None,
349 | login_password=None, check_mode=False):
350 | """ Dispatch to the appropriate method.
351 |
352 | Returns a dict that will be passed to exit_json
353 |
354 | tenant user role state
355 | ------ ---- ---- --------
356 | X present ensure_tenant_exists
357 | X absent ensure_tenant_absent
358 | X X present ensure_user_exists
359 | X X absent ensure_user_absent
360 | X X X present ensure_role_exists
361 | X X X absent ensure_role_absent
362 |
363 |
364 | """
365 | changed = False
366 | id = None
367 | if tenant and not user and not role and state == "present":
368 | changed, id = ensure_tenant_exists(keystone, tenant,
369 | tenant_description, check_mode)
370 | elif tenant and not user and not role and state == "absent":
371 | changed = ensure_tenant_absent(keystone, tenant, check_mode)
372 | elif tenant and user and not role and state == "present":
373 | changed, id = ensure_user_exists(keystone, user, password,
374 | email, tenant, check_mode)
375 | elif tenant and user and not role and state == "absent":
376 | changed = ensure_user_absent(keystone, user, check_mode)
377 | elif tenant and user and role and state == "present":
378 | changed, id = ensure_role_exists(keystone, user, tenant, role,
379 | check_mode)
380 | elif tenant and user and role and state == "absent":
381 | changed = ensure_role_absent(keystone, user, tenant, role, check_mode)
382 | else:
383 | # Should never reach here
384 | raise ValueError("Code should never reach here")
385 |
386 | return dict(changed=changed, id=id)
387 |
388 | # this is magic, see lib/ansible/module_common.py
389 | #<>
390 | if __name__ == '__main__':
391 | main()
392 |
--------------------------------------------------------------------------------
/playbooks/remove_disks.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - hosts: storage
4 |
5 | tasks:
6 |
7 | - name: cleanup image mounts
8 | shell: umount /srv/node/{{ disk_prefix }}{{ item }}; losetup -d /dev/loop{{ item }}; rm -f /var/tmp/{{ disk_prefix }}{{ item }}.img
9 | with_sequence: count=3
10 |
--------------------------------------------------------------------------------
/playbooks/remove_keystone.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - hosts: authentication
4 |
5 | tasks:
6 |
7 | - name: remode keystone mysql database
8 | mysql_db: name=keystone state=absent
9 |
--------------------------------------------------------------------------------
/playbooks/remove_rings.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - hosts: storage
4 |
5 | tasks:
6 |
7 | - name: stop all swift-storage node services
8 | command: swift-init all stop
9 | ignore_errors: True
10 |
11 | - name: cleanup image mounts
12 | shell: umount /srv/node/{{ disk_prefix }}{{ item }}; losetup -d /dev/loop{{ item }}; rm -f /var/tmp/{{ disk_prefix }}{{ item }}.img
13 | with_sequence: count=3
14 |
15 | - hosts: proxy
16 |
17 | tasks:
18 |
19 | - name: stop swift-proxy on proxy nodes
20 | service: name=openstack-swift-proxy state=stopped
21 | when: ansible_os_family == 'RedHat'
22 |
23 | - name: stop swift-proxy on proxy nodes
24 | service: name=swift-proxy state=stopped
25 | when: ansible_os_family == 'Debian'
26 |
27 | - name: remove builder files
28 | command: rm -f /etc/swift/{{item}}.builder
29 | with_items:
30 | - account
31 | - container
32 | - object
33 |
34 | - hosts:
35 | - storage
36 | - proxy
37 |
38 | tasks:
39 |
40 | - name: remove ring files
41 | command: rm -f /etc/swift/{{item}}.ring.gz
42 | with_items:
43 | - account
44 | - container
45 | - object
46 |
--------------------------------------------------------------------------------
/roles/authentication/files/redhat_logging.conf:
--------------------------------------------------------------------------------
1 | [loggers]
2 | keys=root,access
3 |
4 | [handlers]
5 | keys=production,file,access_file,devel
6 |
7 | [formatters]
8 | keys=minimal,normal,debug
9 |
10 |
11 | ###########
12 | # Loggers #
13 | ###########
14 |
15 | [logger_root]
16 | level=WARNING
17 | handlers=file
18 |
19 | [logger_access]
20 | level=INFO
21 | qualname=access
22 | handlers=access_file
23 |
24 |
25 | ################
26 | # Log Handlers #
27 | ################
28 |
29 | [handler_production]
30 | class=handlers.SysLogHandler
31 | level=ERROR
32 | formatter=normal
33 | args=(('localhost', handlers.SYSLOG_UDP_PORT), handlers.SysLogHandler.LOG_USER)
34 |
35 | [handler_file]
36 | class=handlers.WatchedFileHandler
37 | level=WARNING
38 | formatter=normal
39 | args=('/var/log/keystone/error.log',)
40 |
41 | [handler_access_file]
42 | class=handlers.WatchedFileHandler
43 | level=INFO
44 | formatter=minimal
45 | args=('/var/log/keystone/access.log',)
46 |
47 | [handler_devel]
48 | class=StreamHandler
49 | level=NOTSET
50 | formatter=debug
51 | args=(sys.stdout,)
52 |
53 |
54 | ##################
55 | # Log Formatters #
56 | ##################
57 |
58 | [formatter_minimal]
59 | format=%(message)s
60 |
61 | [formatter_normal]
62 | format=(%(name)s): %(asctime)s %(levelname)s %(message)s
63 |
64 | [formatter_debug]
65 | format=(%(name)s): %(asctime)s %(levelname)s %(module)s %(funcName)s %(message)s
--------------------------------------------------------------------------------
/roles/authentication/handlers/debian.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: restart mysql
4 | service: name=mysql state=restarted
5 |
6 | - name: restart keystone
7 | service: name=keystone state=restarted
--------------------------------------------------------------------------------
/roles/authentication/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | #
4 | # Install and configure a keystone authentication server
5 | #
6 |
7 | #
8 | # Debian
9 | #
10 |
11 | - include: debian.yml
12 | when: ansible_os_family == 'Debian'
13 |
14 | #
15 | # Redhat
16 | #
17 |
18 | - include: redhat.yml
19 | when: ansible_os_family == 'RedHat'
--------------------------------------------------------------------------------
/roles/authentication/handlers/redhat.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: restart mysql
4 | service: name=mysqld state=restarted
5 |
6 | - name: restart keystone
7 | service: name=openstack-keystone state=restarted
--------------------------------------------------------------------------------
/roles/authentication/tasks/common.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | #
4 | # Keystone operations common to redhat and debian
5 | #
6 |
7 | - name: copy over keystone.conf from template
8 | template: src=keystone.conf.j2 dest=/etc/keystone/keystone.conf owner=root group=root mode=0644
9 | notify:
10 | - restart keystone
11 |
12 | #
13 | # On Trusty installing the keystone package automatically generates /etc/keystone/ssl populates it
14 | # with various keys that I don't want. Below we see if the key file we want exists, and if not we
15 | # assume that /etc/keystone/ssl was created by the package installation, so remove it.
16 | #
17 |
18 | - name: check if there is a /etc/keystone/ssl/certs/keystone.pem file
19 | command: ls /etc/keystone/ssl/certs/keystone.pem
20 | register: keystone_pem_missing
21 | ignore_errors: true
22 | changed_when: keystone_pem_missing.rc > 0
23 |
24 | - name: remove /etc/keystone/ssl if there is not a keystone.pem file
25 | file: path=/etc/keystone/ssl state=absent
26 | when: keystone_pem_missing.rc > 0
27 | changed_when: keystone_pem_missing.rc > 0
28 |
29 | - name: use keystone ssl_setup to create ssl configuration files and keys
30 | shell: rm -rf /etc/keystone/ssl; keystone-manage ssl_setup --keystone-user keystone --keystone-group keystone
31 | creates=/etc/keystone/ssl/certs/keystone.pem
32 | when: keystone_pem_missing.rc > 0
33 | notify:
34 | - restart keystone
35 |
36 | - name: create keystone database
37 | mysql_db: name=keystone state=present
38 | register: keystone
39 |
40 | - name: create keystone mysql user
41 | mysql_user: name=keystone password={{ keystone_mysql_password }} priv=keystone.*:ALL state=present
42 |
43 | - name: run keystone db_sync
44 | command: keystone-manage db_sync
45 | when: keystone.changed
46 |
47 | # XXX FIX ME XXX
48 | - name: restart debian keystone
49 | service: name=keystone state=restarted
50 | when: keystone.changed and ansible_os_family == 'Debian'
51 |
52 | - name: restart redhat keystone
53 | service: name=openstack-keystone state=restarted
54 | when: keystone.changed and ansible_os_family == 'RedHat'
55 |
56 | # This is for when a cluster is vagrant halted/started
57 | - name: ensure keystone debian keystone is running
58 | service: name=keystone state=running
59 | when: ansible_os_family == 'Debian'
60 |
61 | - name: ensure redhat keystone is running
62 | service: name=openstack-keystone state=running
63 | when: ansible_os_family == 'RedHat'
64 |
65 | - name: wait for keystone to come back up
66 | wait_for: host="{{ keystone_server }}" port=35357
67 |
68 | #
69 | # Keystone endpoints
70 | #
71 |
72 | - name: create keystone identity point
73 | keystone_service: insecure=yes name=keystone type=identity description="Keystone Identity Service" publicurl="https://{{ keystone_server }}:5000/v2.0" internalurl="https://{{ keystone_server }}:5000/v2.0" adminurl="https://{{ keystone_server }}:35357/v2.0" region={{ keystone_region }} token={{ keystone_admin_token }} endpoint="https://127.0.0.1:35357/v2.0"
74 |
75 | - name: create EC2 compatability keystone service
76 | keystone_service: insecure=yes name=ec2 type=ec2 description="EC2 Compatability Layer" publicurl=https://{{ keystone_server }}:8773/services/Cloud internalurl=http://{{ keystone_server }}:8773/services/Cloud adminurl=http://{{ keystone_server }}:8773/services/Admin region={{ keystone_region }} token={{ keystone_admin_token }} endpoint="https://127.0.0.1:35357/v2.0"
77 |
78 | - name: create object storage keystone service
79 | keystone_service: insecure=yes name=swift type=object-store description="Object Storage Service" publicurl='https://{{ swift_proxy_ssl_proxy_server }}/v1/AUTH_$(tenant_id)s' internalurl='https://{{ swift_proxy_ssl_proxy_server }}/v1/AUTH_$(tenant_id)s' adminurl='https://{{ swift_proxy_ssl_proxy_server }}/v1' region={{ keystone_region }} token={{ keystone_admin_token }} endpoint="https://127.0.0.1:35357/v2.0"
80 |
81 | #
82 | # Create keystone tenants
83 | #
84 |
85 | - name: create service keystone tenant
86 | keystone_user: token={{ keystone_admin_token }} tenant=service tenant_description="Service Tenant" endpoint="https://127.0.0.1:35357/v2.0"
87 |
88 | - name: create admin keystone tenant
89 | keystone_user: token={{ keystone_admin_token }} tenant=admin tenant_description="Admin Tenant" endpoint="https://127.0.0.1:35357/v2.0"
90 |
91 | - name: create demo keystone tenant
92 | keystone_user: token={{ keystone_admin_token }} tenant=demo tenant_description="Default Tenant" endpoint="https://127.0.0.1:35357/v2.0"
93 |
94 | #
95 | # Create keystone users
96 | #
97 |
98 | - name: create admin keystone user
99 | keystone_user: token={{ keystone_admin_token }} user={{ keystone_admin_user }} tenant=demo password={{ keystone_admin_user_password }} endpoint="https://127.0.0.1:35357/v2.0"
100 |
101 | - name: create swift service keystone user
102 | keystone_user: token={{ keystone_admin_token }} user=swift tenant=service password={{ keystone_generic_service_password }} endpoint="https://127.0.0.1:35357/v2.0"
103 |
104 | - name: create ec2 service keystone user
105 | keystone_user: token={{ keystone_admin_token }} user=ec2 tenant=service password={{ keystone_generic_service_password }} endpoint="https://127.0.0.1:35357/v2.0"
106 |
107 | - name: create test keystone user
108 | keystone_user: token={{ keystone_admin_token }} user={{ keystone_test_user }} tenant=demo password={{ keystone_test_user_password }} endpoint="https://127.0.0.1:35357/v2.0"
109 |
110 | #
111 | # Keystone create and apply roles
112 | #
113 |
114 | - name: create an admin role and add the admin to it
115 | keystone_user: token={{ keystone_admin_token }} role={{ keystone_admin_role }} user={{ keystone_admin_user }} tenant={{ keystone_admin_tenant }} endpoint="https://127.0.0.1:35357/v2.0"
116 |
117 | - name: create an admin role and add the admin to it
118 | keystone_user: token={{ keystone_admin_token }} role={{ keystone_admin_role }} user=swift tenant=service endpoint="https://127.0.0.1:35357/v2.0"
119 |
120 | - name: create an admin role and add the admin to it
121 | keystone_user: token={{ keystone_admin_token }} role={{ keystone_admin_role }} user=ec2 tenant=service endpoint="https://127.0.0.1:35357/v2.0"
122 |
123 | - name: create a swiftoperator role and add the demo user to it
124 | keystone_user: token={{ keystone_admin_token }} role=swiftoperator user={{ keystone_test_user }} tenant=demo endpoint="https://127.0.0.1:35357/v2.0"
125 |
126 |
127 |
128 |
--------------------------------------------------------------------------------
/roles/authentication/tasks/debian.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: install required keystone packages
4 | apt: pkg={{ item }} state=installed update_cache=yes cache_valid_time=3600
5 | with_items: ubuntu_packages
6 |
7 | - name: create empty /var/log/keystone/keystone.log
8 | command: touch /var/log/keystone/keystone.log creates=/var/log/keystone/keystone.log
9 |
10 | - name: ensure /var/log/keystone/keystone.log has the correct privileges
11 | file: path=/var/log/keystone/keystone.log owner=keystone group=keystone
12 |
13 | - name: remove default keystone sqlite database file
14 | file: path=/var/lib/keystone/keystone.db state=absent
--------------------------------------------------------------------------------
/roles/authentication/tasks/debian_test.yml:
--------------------------------------------------------------------------------
1 | #
2 | # Run a basic test to see if keystone is running
3 | #
4 |
5 | - name: test keystone on precise with user-list
6 | shell: keystone --insecure --token {{ keystone_admin_token }} --endpoint https://{{ keystone_server }}:35357/v2.0 user-list
7 | when: keystone.changed and ansible_distribution_release == "precise"
8 |
9 |
10 | #
11 | # keystone on trusty has commands changed to have "os" before option
12 | #
13 |
14 | - name: test keystone on trusty with user-list
15 | shell: keystone --insecure --os-token {{ keystone_admin_token }} --os-endpoint https://{{ keystone_server }}:35357/v2.0 user-list
16 | when: keystone.changed and ansible_distribution_release == "trusty"
17 |
--------------------------------------------------------------------------------
/roles/authentication/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | #
4 | # Install and configure a keystone authentication server
5 | #
6 |
7 | - include: debian.yml
8 | when: ansible_os_family == 'Debian'
9 |
10 | - include: redhat.yml
11 | when: ansible_os_family == 'RedHat'
12 |
13 | #
14 | # Common
15 | #
16 |
17 | - include: common.yml
18 |
19 | #
20 | # Tests
21 | #
22 |
23 | - include: debian_test.yml
24 | when: ansible_os_family == 'Debian'
25 |
26 | - include: redhat_test.yml
27 | when: ansible_os_family == 'RedHat'
--------------------------------------------------------------------------------
/roles/authentication/tasks/redhat.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: install required keystone packages
4 | yum: pkg={{ item }} state=installed
5 | with_items: redhat_packages
6 |
7 | - name: ensure mysql-server is running
8 | service: name=mysqld enabled=yes state=running
9 |
10 | - name: copy over /etc/keystone/logging.conf
11 | copy: src=redhat_logging.conf dest=/etc/keystone/logging.conf owner=keystone group=keystone mode=0640
12 | notify: restart keystone
13 |
14 | - name: create empty /var/log/keystone/error.log and access.log
15 | command: touch /var/log/keystone/{{ item }}.log creates=/var/log/keystone/{{ item }}.log
16 | with_items:
17 | - error
18 | - access
19 |
20 | - name: ensure error.log and access.log are writable by keystone
21 | file: path=/var/log/keystone/{{ item }}.log state=file owner=keystone group=keystone
22 | with_items:
23 | - error
24 | - access
25 |
26 | - name: ensure keystone can write to /var/log/keystone
27 | file: path=/var/log/keystone recurse=yes owner=keystone group=keystone
28 | notify: restart keystone
29 |
--------------------------------------------------------------------------------
/roles/authentication/tasks/redhat_test.yml:
--------------------------------------------------------------------------------
1 | #
2 | # Run a basic test to see if keystone is running
3 | #
4 |
5 | - name: test keystone with user-list
6 | shell: keystone --insecure --token {{ keystone_admin_token }} --endpoint https://{{ keystone_server }}:35357/v2.0 user-list
7 | when: keystone.changed
8 |
--------------------------------------------------------------------------------
/roles/authentication/templates/keystone.conf.j2:
--------------------------------------------------------------------------------
1 | [DEFAULT]
2 | bind_host = 0.0.0.0
3 | public_port = 5000
4 | admin_port = 35357
5 | admin_token = {{ keystone_admin_token }}
6 | compute_port = 8774
7 | verbose = True
8 | debug = True
9 | log_config = /etc/keystone/logging.conf
10 |
11 | # ================= Syslog Options ============================
12 | # Send logs to syslog (/dev/log) instead of to file specified
13 | # by `log-file`
14 | use_syslog = False
15 |
16 | # Facility to use. If unset defaults to LOG_USER.
17 | # syslog_log_facility = LOG_LOCAL0
18 |
19 | [ssl]
20 | enable = True
21 | certfile = /etc/keystone/ssl/certs/keystone.pem
22 | keyfile = /etc/keystone/ssl/private/keystonekey.pem
23 | ca_certs = /etc/keystone/ssl/certs/ca.pem
24 | ca_key = /etc/keystone/ssl/certs/cakey.pem
25 | # client cert, default is false
26 | #cert_required = False
27 | cert_subject = /C=US/ST=Unset/L=Unset/O=Unset/CN={{ keystone_server }}
28 |
29 | [token]
30 | provider = keystone.token.providers.uuid.Provider
31 |
32 | [sql]
33 | connection = mysql://keystone:{{ keystone_mysql_password }}@127.0.0.1/keystone
34 | idle_timeout = 200
35 |
36 | [ldap]
37 | #url = ldap://localhost
38 | #tree_dn = dc=example,dc=com
39 | #user_tree_dn = ou=Users,dc=example,dc=com
40 | #role_tree_dn = ou=Roles,dc=example,dc=com
41 | #tenant_tree_dn = ou=Groups,dc=example,dc=com
42 | #user = dc=Manager,dc=example,dc=com
43 | #password = freeipa4all
44 | #suffix = cn=example,cn=com
45 |
46 | [identity]
47 | driver = keystone.identity.backends.sql.Identity
48 |
49 | [catalog]
50 | driver = keystone.catalog.backends.sql.Catalog
51 |
52 | [token]
53 | driver = keystone.token.backends.sql.Token
54 |
55 | # Amount of time a token should remain valid (in seconds)
56 | expiration = 86400
57 |
58 | [policy]
59 | driver = keystone.policy.backends.rules.Policy
60 |
61 | [ec2]
62 | driver = keystone.contrib.ec2.backends.sql.Ec2
63 |
64 | [filter:debug]
65 | paste.filter_factory = keystone.common.wsgi:Debug.factory
66 |
67 | [filter:token_auth]
68 | paste.filter_factory = keystone.middleware:TokenAuthMiddleware.factory
69 |
70 | [filter:admin_token_auth]
71 | paste.filter_factory = keystone.middleware:AdminTokenAuthMiddleware.factory
72 |
73 | [filter:xml_body]
74 | paste.filter_factory = keystone.middleware:XmlBodyMiddleware.factory
75 |
76 | [filter:json_body]
77 | paste.filter_factory = keystone.middleware:JsonBodyMiddleware.factory
78 |
79 | [filter:crud_extension]
80 | paste.filter_factory = keystone.contrib.admin_crud:CrudExtension.factory
81 |
82 | [filter:ec2_extension]
83 | paste.filter_factory = keystone.contrib.ec2:Ec2Extension.factory
84 |
85 | [app:public_service]
86 | paste.app_factory = keystone.service:public_app_factory
87 |
88 | [app:admin_service]
89 | paste.app_factory = keystone.service:admin_app_factory
90 |
91 | [pipeline:public_api]
92 | pipeline = token_auth admin_token_auth xml_body json_body debug ec2_extension public_service
93 |
94 | [pipeline:admin_api]
95 | pipeline = token_auth admin_token_auth xml_body json_body debug ec2_extension crud_extension admin_service
96 |
97 | [app:public_version_service]
98 | paste.app_factory = keystone.service:public_version_app_factory
99 |
100 | [app:admin_version_service]
101 | paste.app_factory = keystone.service:admin_version_app_factory
102 |
103 | [pipeline:public_version_api]
104 | pipeline = xml_body public_version_service
105 |
106 | [pipeline:admin_version_api]
107 | pipeline = xml_body admin_version_service
108 |
109 | [composite:main]
110 | use = egg:Paste#urlmap
111 | /v2.0 = public_api
112 | / = public_version_api
113 |
114 | [composite:admin]
115 | use = egg:Paste#urlmap
116 | /v2.0 = admin_api
117 | / = admin_version_api
--------------------------------------------------------------------------------
/roles/authentication/vars/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | redhat_packages:
4 | - openstack-keystone
5 | - mysql-server
6 |
7 | ubuntu_packages:
8 | - keystone
9 | - python-keyring
10 | - mysql-server
11 | - python-mysqldb
12 |
--------------------------------------------------------------------------------
/roles/common/files/CentOS-Base.repo:
--------------------------------------------------------------------------------
1 | # CentOS-Base.repo
2 | #
3 | # The mirror system uses the connecting IP address of the client and the
4 | # update status of each mirror to pick mirrors that are updated to and
5 | # geographically close to the client. You should use this for CentOS updates
6 | # unless you are manually picking other mirrors.
7 | #
8 | # If the mirrorlist= does not work for you, as a fall back you can try the
9 | # remarked out baseurl= line instead.
10 | #
11 | #
12 |
13 | [base]
14 | name=CentOS-$releasever - Base
15 | #mirrorlist=http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=os
16 | baseurl=http://mirror.centos.org/centos/$releasever/os/$basearch/
17 | gpgcheck=1
18 | gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-6
19 |
20 | #released updates
21 | [updates]
22 | name=CentOS-$releasever - Updates
23 | #mirrorlist=http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=updates
24 | baseurl=http://mirror.centos.org/centos/$releasever/updates/$basearch/
25 | gpgcheck=1
26 | gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-6
27 |
28 | #additional packages that may be useful
29 | [extras]
30 | name=CentOS-$releasever - Extras
31 | #mirrorlist=http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=extras
32 | baseurl=http://mirror.centos.org/centos/$releasever/extras/$basearch/
33 | gpgcheck=1
34 | gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-6
35 |
36 | #additional packages that extend functionality of existing packages
37 | [centosplus]
38 | name=CentOS-$releasever - Plus
39 | #mirrorlist=http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=centosplus
40 | baseurl=http://mirror.centos.org/centos/$releasever/centosplus/$basearch/
41 | gpgcheck=1
42 | enabled=0
43 | gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-6
44 |
45 | #contrib - packages by Centos Users
46 | [contrib]
47 | name=CentOS-$releasever - Contrib
48 | #mirrorlist=http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=contrib
49 | baseurl=http://mirror.centos.org/centos/$releasever/updates/$basearch/
50 | gpgcheck=1
51 | enabled=0
52 | gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-6
--------------------------------------------------------------------------------
/roles/common/handlers/debian.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | # debian
--------------------------------------------------------------------------------
/roles/common/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | #
4 | # Debian
5 | #
6 |
7 | - include: debian.yml
8 | when: ansible_os_family == 'Debian'
9 |
10 | #
11 | # Redhat
12 | #
13 |
14 | - include: redhat.yml
15 | when: ansible_os_family == 'RedHat'
--------------------------------------------------------------------------------
/roles/common/handlers/redhat.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: yum makecache
4 | command: yum makecache
--------------------------------------------------------------------------------
/roles/common/tasks/debian.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | #
4 | # Stop portmapper
5 | #
6 |
7 | - name: ensure portmap is not running
8 | service: name=portmap state=stopped enabled=no
9 | ignore_errors: true
10 |
11 | #
12 | # NTP
13 | #
14 |
15 | - include: debian_ntp.yml
16 |
17 | #
18 | # Package cache
19 | #
20 |
21 | - include: debian_package_cache.yml
22 |
23 | #
24 | # Havana repository
25 | #
26 |
27 | - include: debian_openstack_repository.yml
28 |
29 |
--------------------------------------------------------------------------------
/roles/common/tasks/debian_ntp.yml:
--------------------------------------------------------------------------------
1 | #
2 | # NTP
3 | #
4 |
5 | - name: install ntp package
6 | apt: pkg=ntp state=installed
7 |
8 | - name: set timezone to timezone_area/timezone_city
9 | template: src=timezone.j2 dest=/etc/timezone owner=root group=root mode=0644
10 | register: timezone
11 |
12 | - name: setup timezone link
13 | shell: ln -sf /usr/share/zoneinfo/{{timezone_area}}/{{timezone_city}} /etc/localtime
14 | when: timezone.changed
15 |
16 | - name: stop ntpd to run ntpdate
17 | service: name=ntp state=stopped
18 | when: timezone.changed
19 |
20 | - name: set time with ntpdate
21 | command: "ntpdate -s {{ time_server }}"
22 | when: timezone.changed
23 |
24 | - name: start ntpd
25 | service: name=ntp state=running enabled=yes
26 | when: timezone.changed
--------------------------------------------------------------------------------
/roles/common/tasks/debian_openstack_repository.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: install precise openstack havana apt repository
4 | apt_repository: repo='deb http://ubuntu-cloud.archive.canonical.com/ubuntu precise-updates/havana main' state=present
5 | register: new_repo
6 | when: ansible_distribution_release == "precise"
7 |
8 | - name: update apt
9 | command: apt-get update
10 | when: new_repo.changed
--------------------------------------------------------------------------------
/roles/common/tasks/debian_package_cache.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: setup apt-cacher-ng proxy
4 | template: src=01proxy.j2 dest=/etc/apt/apt.conf.d/01proxy
5 | register: new_apt_cacher
6 |
7 | - name: install required packages to add havana repository
8 | apt: name={{ item }} state=installed update_cache=yes #cache_valid_time=3600
9 | with_items: ubuntu_packages
10 | when: new_apt_cacher.changed
--------------------------------------------------------------------------------
/roles/common/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | #
4 | # Debian
5 | #
6 |
7 | - include: debian.yml
8 | when: ansible_os_family == 'Debian'
9 |
10 | #
11 | # Redhat
12 | #
13 |
14 | - include: redhat.yml
15 | when: ansible_os_family == 'RedHat'
--------------------------------------------------------------------------------
/roles/common/tasks/redhat.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | # redhat
4 |
5 | - include: redhat_package_cache.yml
6 |
7 | - include: redhat_ntp.yml
8 |
9 | - include: redhat_openstack_repository.yml
10 |
11 | - name: stop iptables
12 | service: name=iptables state=stopped
13 |
--------------------------------------------------------------------------------
/roles/common/tasks/redhat_ntp.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: ensure ntp is installed
4 | yum: name=ntp state=installed
5 |
6 | - name: set timezone in /etc/sysconfig/clock
7 | lineinfile: dest=/etc/sysconfig/clock regexp="^ZONE" line="ZONE={{timezone_area}}/{{timezone_city}}"
8 | register: timezone
9 |
10 | # XXX Could be in common... ###
11 | - name: setup timezone link
12 | shell: ln -sf /usr/share/zoneinfo/{{timezone_area}}/{{timezone_city}} /etc/localtime
13 | when: timezone.changed
14 |
15 | - name: ensure ntpd is running
16 | service: name=ntpd state=running
--------------------------------------------------------------------------------
/roles/common/tasks/redhat_openstack_repository.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: ensure yum-plugin-priorities is installed
4 | yum: name=yum-plugin-priorities state=installed
5 |
6 | - name: install RDO openstack repository
7 | command: yum install -y http://rdo.fedorapeople.org/rdo-release.rpm creates=/etc/yum.repos.d/rdo-release.repo
8 | #command: yum insatll -y http://repos.fedorapeople.org/repos/openstack/openstack-havana/rdo-release-havana-7.noarch.rpm
9 | notify:
10 | - yum makecache
11 |
12 | - name: install EPEL repository
13 | command: yum install -y http://fedora.mirror.nexicom.net/epel/6/i386/epel-release-6-8.noarch.rpm creates=/etc/yum.repos.d/epel.repo
14 | notify:
15 | - yum makecache
16 |
17 | - name: enable RDO repository
18 | lineinfile: dest=/etc/yum.repos.d/rdo-release.repo regexp=^enabled line="enabled=1"
19 |
--------------------------------------------------------------------------------
/roles/common/tasks/redhat_package_cache.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: configure proxy cache server for yum
4 | lineinfile: dest=/etc/yum.conf line="proxy=http://{{ package_cache_server }}:3128" insertafter="^[main]"
5 |
6 | # XXX Better way to remove this plugin? XXX
7 | - name: remove fastestmirror plugin
8 | file: path=/etc/yum/pluginconf.d/fastestmirror.conf state=absent
9 |
10 | - name: copy over new CentOS-Base.repo file
11 | copy: src=CentOS-Base.repo dest=/etc/yum.repos.d/CentOS-Base.repo
12 | notify:
13 | - yum makecache
14 |
--------------------------------------------------------------------------------
/roles/common/templates/01proxy.j2:
--------------------------------------------------------------------------------
1 | Acquire::http { Proxy "http://{{ package_cache_server }}:3142"; };
--------------------------------------------------------------------------------
/roles/common/templates/timezone.j2:
--------------------------------------------------------------------------------
1 | {{ timezone_area }}/{{timezone_city}}
--------------------------------------------------------------------------------
/roles/common/vars/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | ubuntu_packages:
4 | - ubuntu-cloud-keyring
5 | - python-pycurl
--------------------------------------------------------------------------------
/roles/lbssl/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: restart pound
4 | service: name=pound state=restarted
--------------------------------------------------------------------------------
/roles/lbssl/tasks/common.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - include: common_pound_ssl.yml
4 | - include: common_pound_service.yml
5 |
6 |
--------------------------------------------------------------------------------
/roles/lbssl/tasks/common_pound_service.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: ensure pound is running
4 | service: name=pound state=running
--------------------------------------------------------------------------------
/roles/lbssl/tasks/common_pound_ssl.yml:
--------------------------------------------------------------------------------
1 |
2 | #
3 | # Create an SSL certificate for pound to use
4 | #
5 |
6 | # XXX FIX ME - ansible_fqdn? XXX
7 | - name: create self-signed SSL cert for pound
8 | command: openssl req -new -nodes -x509 -subj "/C=US/ST=Oregon/L=Portland/O=IT/CN={{ ansible_eth1.ipv4.address }}" -days 3650 -keyout /etc/pound/server.key -out /etc/pound/server.crt -extensions v3_ca creates=/etc/pound/server.crt
9 | register: new_cert
10 | notify: restart pound
11 |
12 | - name: verify cert file
13 | command: openssl x509 -in /etc/pound/server.crt -text
14 | when: new_cert.changed
15 |
16 | - name: create a pem file
17 | command: openssl x509 -in /etc/pound/server.crt -out /etc/pound/server.pem
18 | when: new_cert.changed
19 |
20 | - name: add server.key to server.pem file
21 | shell: openssl rsa -in /etc/pound/server.key >> /etc/pound/server.pem
22 | when: new_cert.changed
--------------------------------------------------------------------------------
/roles/lbssl/tasks/debian.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | #
4 | # Install and configure pound to be ssl termination for swift proxy
5 | #
6 |
7 | - name: install required packages for lbssl
8 | apt: pkg={{ item }} state=installed update_cache=yes cache_valid_time=3600
9 | with_items: ubuntu_packages
10 |
11 | - name: set /etc/default/pound to allow pound to start
12 | template: src=default_pound.j2 dest=/etc/default/pound
13 |
14 | - name: copy over pound.cfg
15 | template: src=ubuntu_pound.cfg.j2 dest=/etc/pound/pound.cfg owner=root group=root mode=0644
16 | notify: restart pound
--------------------------------------------------------------------------------
/roles/lbssl/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | #
4 | # Debian
5 | #
6 |
7 | - include: debian.yml
8 | when: ansible_os_family == 'Debian'
9 |
10 | #
11 | # Redhat
12 | #
13 |
14 | - include: redhat.yml
15 | when: ansible_os_family == 'RedHat'
16 |
17 | - include: common.yml
--------------------------------------------------------------------------------
/roles/lbssl/tasks/redhat.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | # redhat
4 |
5 |
6 | # For some reason the pound package is capitalized
7 | - name: ensure required packages are installed
8 | yum: name={{ item }} state=installed
9 | with_items: redhat_packages
10 |
11 | - name: ensure there is a /etc/pound directory
12 | file: dest=/etc/pound state=directory owner=pound group=pound mode=0750
13 |
14 | - name: remove default /etc/pound.cfg file
15 | file: path=/etc/pound.cfg state=absent
16 |
17 | - name: copy over pound.cfg
18 | template: src=redhat_pound.cfg.j2 dest=/etc/pound/pound.cfg owner=root group=root mode=0644
19 | notify: restart pound
20 |
21 | - name: link /etc/pound/pound.cfg to /etc/pound.cfg
22 | file: src=/etc/pound/pound.cfg dest=/etc/pound.cfg state=link
23 |
--------------------------------------------------------------------------------
/roles/lbssl/templates/default_pound.j2:
--------------------------------------------------------------------------------
1 | # Defaults for pound initscript
2 | # sourced by /etc/init.d/pound
3 | # installed at /etc/default/pound by the maintainer scripts
4 |
5 | # prevent startup with default configuration
6 | # set the below varible to 1 in order to allow pound to start
7 | startup=1
--------------------------------------------------------------------------------
/roles/lbssl/templates/redhat_pound.cfg.j2:
--------------------------------------------------------------------------------
1 | ## see pound(8) for details
2 | daemon 1
3 | ######################################################################
4 | ## global options:
5 | User "pound"
6 | Group "pound"
7 | #RootJail "/chroot/pound"
8 | ## Logging: (goes to syslog by default)
9 | ## 0 no logging
10 | ## 1 normal
11 | ## 2 extended
12 | ## 3 Apache-style (common log format)
13 | LogLevel 0
14 | ## turn on dynamic scaling (off by default)
15 | # Dyn Scale 1
16 | ## check backend every X secs:
17 | Alive 30
18 | ## client timeout
19 | #Client 10
20 | ## allow 10 second proxy connect time
21 | ConnTO 10
22 | ## use hardware-accelleration card supported by openssl(1):
23 | #SSLEngine "aesni"
24 | # poundctl control socket
25 | Control "/var/lib/pound/pound.cfg"
26 | ######################################################################
27 | ## listen, redirect and ... to:
28 | ## redirect all swift requests on port 443 to local swift proxy
29 | ListenHTTPS
30 | Address 0.0.0.0
31 | Port 443
32 | Cert "/etc/pound/server.pem"
33 | ## Certs to accept from clients
34 | ## CAlist "CA_file"
35 | ## Certs to use for client verification
36 | ## VerifyList "Verify_file"
37 | ## Request client cert - don't verify
38 | ## Ciphers "AES256-SHA"
39 | ## allow PUT and DELETE also (by default only GET, POST and HEAD)?:
40 | NoHTTPS11 0
41 | ## allow PUT and DELETE also (by default only GET, POST and HEAD)?:
42 | xHTTP 1
43 | Service
44 | BackEnd
45 | ## XXX FIX ME - should be a variable XXX
46 | Address {{ hostvars[groups['proxy'][0]]['ansible_eth2']['ipv4']['address'] }}
47 | Port 8080
48 | End
49 | End
50 | End
--------------------------------------------------------------------------------
/roles/lbssl/templates/ubuntu_pound.cfg.j2:
--------------------------------------------------------------------------------
1 | ## see pound(8) for details
2 | daemon 1
3 | ######################################################################
4 | ## global options:
5 | User "www-data"
6 | Group "www-data"
7 | #RootJail "/chroot/pound"
8 | ## Logging: (goes to syslog by default)
9 | ## 0 no logging
10 | ## 1 normal
11 | ## 2 extended
12 | ## 3 Apache-style (common log format)
13 | LogLevel 0
14 | ## turn on dynamic scaling (off by default)
15 | # Dyn Scale 1
16 | ## check backend every X secs:
17 | Alive 30
18 | ## client timeout
19 | #Client 10
20 | ## allow 10 second proxy connect time
21 | ConnTO 10
22 | ## use hardware-accelleration card supported by openssl(1):
23 | #SSLEngine "aesni"
24 | # poundctl control socket
25 | Control "/var/run/pound/poundctl.socket"
26 | ######################################################################
27 | ## listen, redirect and ... to:
28 | ## redirect all swift requests on port 443 to local swift proxy
29 | ListenHTTPS
30 | Address 0.0.0.0
31 | Port 443
32 | Cert "/etc/pound/server.pem"
33 | ## Certs to accept from clients
34 | ## CAlist "CA_file"
35 | ## Certs to use for client verification
36 | ## VerifyList "Verify_file"
37 | ## Request client cert - don't verify
38 | ## Ciphers "AES256-SHA"
39 | ## allow PUT and DELETE also (by default only GET, POST and HEAD)?:
40 | NoHTTPS11 0
41 | ## allow PUT and DELETE also (by default only GET, POST and HEAD)?:
42 | xHTTP 1
43 | Service
44 | BackEnd
45 | ## XXX FIX ME - should be a variable XXX
46 | Address {{ hostvars[groups['proxy'][0]]['ansible_eth2']['ipv4']['address'] }}
47 | Port 8080
48 | End
49 | End
50 | End
51 |
--------------------------------------------------------------------------------
/roles/lbssl/vars/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | # Yup -- capital P on Pound for redhat
4 | redhat_packages:
5 | - Pound
6 | - openssl
7 |
8 | ubuntu_packages:
9 | - pound
10 | - openssl
11 |
--------------------------------------------------------------------------------
/roles/package_cache/handlers/debian.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: restart apt-cacher-ng
4 | service: name=apt-cacher-ng state=restarted
--------------------------------------------------------------------------------
/roles/package_cache/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 |
4 | #
5 | # Debian
6 | #
7 |
8 | - include: debian.yml
9 | when: ansible_os_family == 'Debian'
10 |
11 | #
12 | # Redhat
13 | #
14 |
15 | - include: redhat.yml
16 | when: ansible_os_family == 'RedHat'
--------------------------------------------------------------------------------
/roles/package_cache/handlers/redhat.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | # redhat handlers
4 |
5 | - name: restart squid
6 | service: name=squid state=restarted
--------------------------------------------------------------------------------
/roles/package_cache/tasks/debian.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - include: debian_install.yml
4 | - include: debian_configure.yml
5 | - include: debian_service.yml
6 |
--------------------------------------------------------------------------------
/roles/package_cache/tasks/debian_configure.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | # Fully quoted lineinefile due to yaml bug: http://www.ansibleworks.com/docs/modules.html#lineinfile
4 | - name: set bind address in apt-cacher-ng configuration file
5 | lineinfile: "dest=/etc/apt-cacher-ng/acng.conf regexp='^# BindAddress' insertafter=yes line='BindAddress: 0.0.0.0' state=present"
6 | notify:
7 | - restart apt-cacher-ng
8 |
--------------------------------------------------------------------------------
/roles/package_cache/tasks/debian_install.yml:
--------------------------------------------------------------------------------
1 | - name: install required package_cache packages
2 | apt: pkg={{ item }} state=installed update_cache=yes cache_valid_time=3600
3 | with_items:
4 | - apt-cacher-ng
--------------------------------------------------------------------------------
/roles/package_cache/tasks/debian_service.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: ensure apt-cacher-ng is running
4 | service: name=apt-cacher-ng state=running
--------------------------------------------------------------------------------
/roles/package_cache/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | #
4 | # Debian
5 | #
6 |
7 | - include: debian.yml
8 | when: ansible_os_family == 'Debian'
9 |
10 | #
11 | # Redhat
12 | #
13 |
14 | - include: redhat.yml
15 | when: ansible_os_family == 'RedHat'
16 |
--------------------------------------------------------------------------------
/roles/package_cache/tasks/redhat.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - include: redhat_install.yml
4 | - include: redhat_configure.yml
5 | - include: redhat_service.yml
6 |
--------------------------------------------------------------------------------
/roles/package_cache/tasks/redhat_configure.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: set a cache_dir in squid.conf
4 | lineinfile: dest=/etc/squid/squid.conf line="cache_dir ufs /var/spool/squid 7000 16 256" insertafter="^#cache_dir"
5 | notify:
6 | - restart squid
7 |
8 | - name: set a cache_dir in squid.conf
9 | lineinfile: dest=/etc/squid/squid.conf line="maximum_object_size 512000 KB"
10 | notify:
11 | - restart squid
--------------------------------------------------------------------------------
/roles/package_cache/tasks/redhat_install.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: install squid to cache rpm packages
4 | yum: pkg=squid state=installed
--------------------------------------------------------------------------------
/roles/package_cache/tasks/redhat_service.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: ensure squid is running
4 | service: name=squid state=started enabled=yes
--------------------------------------------------------------------------------
/roles/proxy/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: restart memcached
4 | service: name=memcached state=restarted
5 |
6 | # Probably can't restart the proxy on first install
7 | - name: restart proxy-server
8 | service: name=swift-proxy state=restarted
--------------------------------------------------------------------------------
/roles/proxy/tasks/common.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: make sure memcached is running
4 | service: name=memcached state=running
5 |
6 | - name: ensure permissions on /var/cache/swift
7 | file: path=/var/cache/swift state=directory group=swift owner=swift mode=0700
8 |
9 | # Note: Can't start the proxy yet
10 | - name: copy over proxy-server.conf
11 | template: src=proxy-server.conf.j2 dest=/etc/swift/proxy-server.conf owner=swift group=swift mode=0640
12 |
13 | - name: copy over object-expirer.conf
14 | template: src=object-expirer.conf.j2 dest=/etc/swift/object-expirer.conf owner=swift group=swift mode=0640
15 |
16 | #
17 | # Build rings
18 | #
19 |
20 | - name: run swift-ring-builder for accounts, containers, and objects
21 | command: swift-ring-builder {{ item }}.builder create {{ partition_power }} {{ replicas }} {{ min_part_hours }}
22 | chdir=/etc/swift
23 | creates=/etc/swift/{{ item }}.ring.gz
24 | with_items:
25 | - account
26 | - container
27 | - object
--------------------------------------------------------------------------------
/roles/proxy/tasks/debian.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: install required proxy packages
4 | apt: name={{ item }} state=installed update_cache=yes cache_valid_time=3600
5 | with_items: ubuntu_packages
6 |
7 | - include: debian_memcached_config.yml
--------------------------------------------------------------------------------
/roles/proxy/tasks/debian_memcached_config.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: tell memcached what IP to listen on
4 | lineinfile: dest=/etc/memcached.conf regexp="^-l" line='-l {{ ansible_eth3.ipv4.address }}'
5 | notify: restart memcached
--------------------------------------------------------------------------------
/roles/proxy/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | #
4 | # Debian
5 | #
6 |
7 | - include: debian.yml
8 | when: ansible_os_family == 'Debian'
9 |
10 | #
11 | # Redhat
12 | #
13 |
14 | - include: redhat.yml
15 | when: ansible_os_family == 'RedHat'
16 |
17 |
18 | - include: common.yml
19 |
--------------------------------------------------------------------------------
/roles/proxy/tasks/redhat.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: install required proxy packages
4 | yum: name={{ item }} state=installed
5 | with_items: redhat_packages
6 |
7 |
--------------------------------------------------------------------------------
/roles/proxy/tasks/redhat_memcached_config.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: tell memcached what IP to listen on
4 | lineinfile: dest=/etc/sysconfig/memcached regexp="^OPTIONS" line='OPTIONS="-l {{ ansible_eth3.ipv4.address }}"'
5 | notify:
6 | - restart memcached
--------------------------------------------------------------------------------
/roles/proxy/templates/object-expirer.conf.j2:
--------------------------------------------------------------------------------
1 | [DEFAULT]
2 | # swift_dir = /etc/swift
3 | # user = swift
4 | # You can specify default log routing here if you want:
5 | # log_name = swift
6 | # log_facility = LOG_LOCAL0
7 | # log_level = INFO
8 |
9 | [object-expirer]
10 | interval = 300
11 |
12 | [pipeline:main]
13 | pipeline = catch_errors cache proxy-server
14 |
15 | [app:proxy-server]
16 | use = egg:swift#proxy
17 | # See proxy-server.conf-sample for options
18 | memcache_servers = {{ ansible_eth3.ipv4.address }}:11211
19 |
20 | [filter:cache]
21 | use = egg:swift#memcache
22 | # See proxy-server.conf-sample for options
23 |
24 | [filter:catch_errors]
25 | use = egg:swift#catch_errors
26 | # See proxy-server.conf-sample for options
27 |
--------------------------------------------------------------------------------
/roles/proxy/templates/proxy-server.conf.j2:
--------------------------------------------------------------------------------
1 | [DEFAULT]
2 | # XXX Fix these certs XXX
3 | #cert_file = /etc/swift/cert.crt
4 | #key_file = /etc/swift/cert.key
5 | # This is 8888 in official havana doc
6 | bind_ip = {{ ansible_eth2.ipv4.address }}
7 | bind_port = 8080
8 | # is not in official havana example
9 | #workers = 32
10 | user = swift
11 |
12 | [pipeline:main]
13 | pipeline = catch_errors healthcheck cache ratelimit authtoken keystoneauth proxy-server
14 |
15 | [app:proxy-server]
16 | use = egg:swift#proxy
17 | allow_account_management = true
18 | account_autocreate = true
19 |
20 | [filter:keystoneauth]
21 | use = egg:swift#keystoneauth
22 | operator_roles = Member, admin, swiftoperator
23 |
24 | [filter:authtoken]
25 | paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory
26 | signing_dir = /var/cache/swift
27 | auth_host = {{ keystone_server }}
28 | auth_port = 35357
29 | #auth_protocol = http
30 | auth_protocol = https
31 | # is not in official havana example
32 | #auth_uri = http://{{ keystone_server }}:5000
33 | admin_tenant_name = service
34 | admin_user = swift
35 | admin_password = {{ keystone_generic_service_password }}
36 | #delay_auth_decision = 10
37 | # is set to tru in official havana documentation
38 | delay_auth_decision = true
39 | insecure = true
40 |
41 | [filter:cache]
42 | use = egg:swift#memcache
43 | memcache_servers = {{ ansible_eth3.ipv4.address }}:11211
44 |
45 |
46 | [filter:catch_errors]
47 | use = egg:swift#catch_errors
48 |
49 | [filter:healthcheck]
50 | use = egg:swift#healthcheck
51 |
52 | [filter:ratelimit]
53 | use = egg:swift#ratelimit
54 | clock_accuracy = 1000
55 | max_sleep_time_seconds = 60
56 | log_sleep_time_seconds = 0
57 | rate_buffer_seconds = 5
58 | account_ratelimit = 0
59 |
--------------------------------------------------------------------------------
/roles/proxy/vars/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | ubuntu_packages:
4 | - swift-proxy
5 | - swift-object
6 | - memcached
7 | - python-keystoneclient
8 | - python-swiftclient
9 | - swift-plugin-s3
10 | - python-netifaces
11 | - python-xattr
12 | - python-memcache
13 |
14 | redhat_packages:
15 | - openstack-swift-proxy
16 | - memcached
--------------------------------------------------------------------------------
/roles/storage/files/redhat_rsync.init:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Source function library.
4 | . /etc/rc.d/init.d/functions
5 |
6 | [ -f /usr/bin/rsync ] || exit 0
7 |
8 | case "$1" in
9 | start)
10 | action "Starting rsyncd: " /usr/bin/rsync --daemon
11 | ;;
12 | stop)
13 | action "Stopping rsyncd: " killall rsync
14 | ;;
15 | *)
16 | echo "Usage: rsyncd {start|stop}"
17 | exit 1
18 | esac
19 | exit 0
--------------------------------------------------------------------------------
/roles/storage/tasks/common.yml:
--------------------------------------------------------------------------------
1 |
2 | - name: make sure /srv/node exists
3 | file: path=/srv/node state=directory
4 |
5 | - name: make sure /var/swift/recon exists
6 | file: path=/var/swift/recon state=directory owner=swift group=swift mode=0750
7 |
8 | - name: check if losetup loop1 is already up
9 | shell: mount | grep "loop1 "
10 | register: losetup
11 | ignore_errors: True
12 | changed_when: losetup.rc > 0
13 | tags:
14 | - remount_loop_devices
15 |
16 | #
17 | # Remove disks
18 | #
19 |
20 | - name: umount swift disks
21 | mount: name=/srv/node/{{ disk_prefix }}{{ item }} state=unmounted src=/dev/loop{{ item }} fstype=xfs
22 | with_sequence: count={{ disks }}
23 | when: "losetup.rc > 0"
24 |
25 | - name: remove /srv/node/* disk device directories
26 | file: state=absent path=/srv/node/{{ disk_prefix }}{{ item }}
27 | with_sequence: count={{ disks }}
28 | when: "losetup.rc > 0"
29 |
30 | #- name: remove loop devices
31 | # command: losetup -d /dev/loop{{ item }}
32 | # with_sequence: count={{ disks }}
33 | # when: "losetup.rc > 0"
34 |
35 | - name: remove sparse images
36 | file: state=absent path=/var/tmp/{{ disk_prefix }}{{ item }}.img
37 | with_sequence: count={{ disks }}
38 | when: "losetup.rc > 0"
39 |
40 | #
41 | # Create disks
42 | #
43 |
44 | - name: create sparse images
45 | command: truncate --size {{ loop_disk_size }}G /var/tmp/{{ disk_prefix }}{{ item }}.img
46 | with_sequence: count={{ disks }}
47 | when: "losetup.rc > 0"
48 |
49 | - name: setup loop devices
50 | command: losetup /dev/loop{{ item }} /var/tmp/{{ disk_prefix }}{{ item }}.img
51 | with_sequence: count={{ disks }}
52 | when: "losetup.rc > 0"
53 | tags:
54 | - remount_loop_devices
55 |
56 | - name: create file system on loop devices
57 | shell: mkfs.xfs -i size=1024 /dev/loop{{ item }}; mkdir /srv/node/{{ disk_prefix }}{{ item }};
58 | with_sequence: count={{ disks }}
59 | when: "losetup.rc > 0"
60 |
61 | - name: make /srv/node disk device directories
62 | file: state=directory path=/srv/node/{{ disk_prefix }}{{ item }} owner=swift group=swift mode=0770
63 | with_sequence: count={{ disks }}
64 | when: "losetup.rc > 0"
65 |
66 | # NOTE: Would likely not want the nobootwait in production, but in virtualbox the loop devices won't be setup yet so it won't be able to reboot without that setting, assuming the mount points are setup in /etc/fstab
67 | # NOTE: later on removed nobootwait b/c didn't work on redhat
68 | - name: mount swift disks
69 | mount: name=/srv/node/{{ disk_prefix }}{{ item }} state=mounted src=/dev/loop{{ item }} opts="noatime,nodiratime,nobarrier" fstype=xfs
70 | with_sequence: count={{ disks }}
71 | when: "losetup.rc > 0"
72 | tags:
73 | - remount_loop_devices
74 |
75 | - name: set permissions on /srv/node/* after being mounted
76 | file: state=directory path=/srv/node/{{ disk_prefix }}{{ item }} owner=swift group=swift mode=0770
77 | with_sequence: count={{ disks }}
78 | when: "losetup.rc > 0"
79 | tags:
80 | - remount_loop_devices
81 |
82 | #
83 | # Configure rsync
84 | #
85 |
86 | - name: copy over rsyncd.conf to swift storage
87 | template: src=rsyncd.conf.j2 dest=/etc/rsyncd.conf
88 |
89 | - name: make sure rsync is running
90 | service: name=rsync state=started
91 |
92 | #
93 | # Configure swift storage processes
94 | #
95 |
96 | - name: ensure *-server directories exist in /etc/swift
97 | file: path=/etc/swift/{{ item }}-server state=directory owner=swift group=swift mode=0755
98 | with_items:
99 | - account
100 | - container
101 | - object
102 |
103 | - name: copy over *-server.conf files
104 | template: src={{ item }}-server.conf.j2 dest=/etc/swift/{{ item }}-server/{{ item }}-server.conf owner=swift group=swift mode=0644
105 | with_items:
106 | - account
107 | - container
108 | - object
109 |
110 | - name: copy over *-replication.conf files
111 | template: src={{ item }}-replication.conf.j2 dest=/etc/swift/{{ item }}-server/{{ item }}-replication.conf owner=swift group=swift mode=0644
112 | with_items:
113 | - account
114 | - container
115 | - object
116 |
--------------------------------------------------------------------------------
/roles/storage/tasks/common_build_rings.yml:
--------------------------------------------------------------------------------
1 |
2 | #
3 | # Create rings - NOTE: Is being delegated to the swift proxy(s)
4 | #
5 |
6 | #
7 | # Check to see if the ring file already exists.
8 | # I suppose this will only register container.ring.gz...
9 | #
10 | # This is necessary in case the cluster is halted.
11 | #
12 | - name: check if account.ring.gz already exists
13 | #stat: path=/etc/swift/{{ item }}.ring.gz
14 | command: ls /etc/swift/account.ring.gz
15 | register: rings_exist
16 | ignore_errors: true
17 | changed_when: rings_exist.rc > 0
18 |
19 | - debug: msg="{{ rings_exist.rc }}"
20 |
21 | - name: build account ring
22 | command: swift-ring-builder account.builder add r{{ region }}z{{ zone }}-{{ ansible_eth3.ipv4.address }}:6002R{{ ansible_eth4.ipv4.address }}:6002/{{ disk_prefix }}{{ item }} 100
23 | chdir=/etc/swift
24 | delegate_to: "{{ swift_proxy_server }}"
25 | with_sequence: count={{ disks }}
26 | when: losetup.rc >0 and rings_exist.rc > 0
27 |
28 |
29 | - name: build container ring
30 | command: swift-ring-builder container.builder add r{{ region }}z{{ zone }}-{{ ansible_eth3.ipv4.address }}:6001R{{ ansible_eth4.ipv4.address }}:6001/{{ disk_prefix }}{{ item }} 100
31 | chdir=/etc/swift
32 | delegate_to: "{{ swift_proxy_server }}"
33 | with_sequence: count={{ disks }}
34 | when: losetup.rc >0 and rings_exist.rc > 0
35 |
36 | - name: build object ring
37 | command: swift-ring-builder object.builder add r{{ region }}z{{ zone }}-{{ ansible_eth3.ipv4.address }}:6000R{{ ansible_eth4.ipv4.address }}:6000/{{ disk_prefix }}{{ item }} 100
38 | chdir=/etc/swift
39 | delegate_to: "{{ swift_proxy_server }}"
40 | with_sequence: count={{ disks }}
41 | when: losetup.rc >0 and rings_exist.rc > 0
42 |
--------------------------------------------------------------------------------
/roles/storage/tasks/debian.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | #
4 | # Install swift storage
5 | #
6 |
7 | - name: install required packages for swift storage
8 | apt: pkg={{ item }} state=installed update_cache=yes cache_valid_time=3600
9 | with_items: ubuntu_packages
10 |
11 | - name: edit /etc/sysconfig/rsync
12 | lineinfile: dest=/etc/default/rsync regexp=^RSYNC_ENABLE line="RSYNC_ENABLE=true"
13 |
14 | - name: edit /etc/sysconfig/rsync
15 | lineinfile: dest=/etc/default/rsync regexp=^RSYNC_OPTS line="RSYNC_OPTS='--address {{ ansible_eth3.ipv4.address }}'"
16 |
--------------------------------------------------------------------------------
/roles/storage/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | #
4 | # Debian
5 | #
6 |
7 | - include: debian.yml
8 | when: ansible_os_family == 'Debian'
9 |
10 | #
11 | # Redhat
12 | #
13 |
14 | - include: redhat.yml
15 | when: ansible_os_family == 'RedHat'
16 |
17 | - include: common.yml
18 | - include: common_build_rings.yml
19 |
--------------------------------------------------------------------------------
/roles/storage/tasks/redhat.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | #
4 | # Install swift storage
5 | #
6 |
7 | - name: install required packages for swift storage
8 | yum: pkg={{ item }} state=installed
9 | with_items: redhat_packages
10 |
11 | - name: copy rsync init script to /etc/init.d/rsync
12 | copy: src=redhat_rsync.init dest=/etc/init.d/rsync owner=root group=root mode=0755
--------------------------------------------------------------------------------
/roles/storage/templates/account-replication.conf.j2:
--------------------------------------------------------------------------------
1 | [DEFAULT]
2 | devices = /srv/node
3 | bind_ip = {{ ansible_eth4.ipv4.address }}
4 | workers = 2
5 |
6 | [pipeline:main]
7 | pipeline = account-server
8 |
9 | [app:account-server]
10 | use = egg:swift#account
11 | replication_server = True
12 |
13 | [account-replicator]
14 |
15 | [account-auditor]
16 |
17 | [account-reaper]
--------------------------------------------------------------------------------
/roles/storage/templates/account-server.conf.j2:
--------------------------------------------------------------------------------
1 | [DEFAULT]
2 | devices = /srv/node
3 | bind_ip = {{ ansible_eth3.ipv4.address }}
4 | workers = 2
5 |
6 | [pipeline:main]
7 | pipeline = account-server
8 |
9 | [app:account-server]
10 | use = egg:swift#account
11 |
12 | [account-replicator]
13 |
14 | [account-auditor]
15 |
16 | [account-reaper]
--------------------------------------------------------------------------------
/roles/storage/templates/container-replication.conf.j2:
--------------------------------------------------------------------------------
1 | [DEFAULT]
2 | devices = /srv/node
3 | bind_ip = {{ ansible_eth4.ipv4.address }}
4 | workers = 2
5 |
6 | [pipeline:main]
7 | pipeline = container-server
8 |
9 | [app:container-server]
10 | use = egg:swift#container
11 | replication_server = True
12 |
13 | [container-replicator]
14 |
15 | [container-updater]
16 |
17 | [container-auditor]
18 |
19 | [container-sync]
20 |
--------------------------------------------------------------------------------
/roles/storage/templates/container-server.conf.j2:
--------------------------------------------------------------------------------
1 | [DEFAULT]
2 | devices = /srv/node
3 | bind_ip = {{ ansible_eth3.ipv4.address }}
4 | workers = 2
5 |
6 | [pipeline:main]
7 | pipeline = container-server
8 |
9 | [app:container-server]
10 | use = egg:swift#container
11 |
12 | [container-replicator]
13 |
14 | [container-updater]
15 |
16 | [container-auditor]
17 |
18 | [container-sync]
19 |
--------------------------------------------------------------------------------
/roles/storage/templates/object-replication.conf.j2:
--------------------------------------------------------------------------------
1 | [DEFAULT]
2 | devices = /srv/node
3 | bind_ip = {{ ansible_eth4.ipv4.address }}
4 | workers = 2
5 |
6 | [pipeline:main]
7 | pipeline = object-server
8 |
9 | [app:object-server]
10 | use = egg:swift#object
11 | replication_server = True
12 |
13 | [object-replicator]
14 |
15 | [object-updater]
16 |
17 | [object-auditor]
18 |
19 | [object-expirer]
--------------------------------------------------------------------------------
/roles/storage/templates/object-server.conf.j2:
--------------------------------------------------------------------------------
1 | [DEFAULT]
2 | devices = /srv/node
3 | bind_ip = {{ ansible_eth3.ipv4.address }}
4 | workers = 2
5 |
6 | [pipeline:main]
7 | pipeline = object-server
8 |
9 | [app:object-server]
10 | use = egg:swift#object
11 |
12 | [object-replicator]
13 |
14 | [object-updater]
15 |
16 | [object-auditor]
17 |
18 | [object-expirer]
--------------------------------------------------------------------------------
/roles/storage/templates/rsyncd.conf.j2:
--------------------------------------------------------------------------------
1 | uid = swift
2 | gid = swift
3 | log file = /var/log/rsyncd.log
4 | pid file = /var/run/rsyncd.pid
5 | address = {{ ansible_eth3.ipv4.address }}
6 |
7 | [account]
8 | max connections = 2
9 | path = /srv/node/
10 | read only = false
11 | lock file = /var/lock/account.lock
12 |
13 | [container]
14 | max connections = 2
15 | path = /srv/node/
16 | read only = false
17 | lock file = /var/lock/container.lock
18 |
19 | [object]
20 | max connections = 2
21 | path = /srv/node/
22 | read only = false
23 | lock file = /var/lock/object.lock
--------------------------------------------------------------------------------
/roles/storage/vars/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | ubuntu_packages:
4 | - swift-account
5 | - swift-container
6 | - swift-object
7 | - xfsprogs
8 | - parted
9 |
10 | redhat_packages:
11 | - openstack-swift-account
12 | - openstack-swift-container
13 | - openstack-swift-object
14 | - xfsprogs
15 | - parted
--------------------------------------------------------------------------------
/roles/swift_common/tasks/common.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: ensure /etc/swift exists
4 | file: path=/etc/swift owner=swift group=swift mode=0750 state=directory
5 |
6 | - name: copy over swift.conf file
7 | template: src=swift.conf.j2 dest=/etc/swift/swift.conf owner=swift group=swift mode=0600
--------------------------------------------------------------------------------
/roles/swift_common/tasks/debian.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: install required packages for swift_common
4 | apt: pkg={{ item }} state=installed update_cache=yes #cache_valid_time=3600
5 | with_items: ubuntu_packages
6 |
--------------------------------------------------------------------------------
/roles/swift_common/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | #
4 | # Debian
5 | #
6 |
7 | - include: debian.yml
8 | when: ansible_os_family == 'Debian'
9 |
10 | #
11 | # Redhat
12 | #
13 |
14 | - include: redhat.yml
15 | when: ansible_os_family == 'RedHat'
16 |
17 |
18 | - include: common.yml
--------------------------------------------------------------------------------
/roles/swift_common/tasks/redhat.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: install required packages for swift_common
4 | yum: pkg={{ item }} state=installed
5 | with_items: redhat_packages
6 |
--------------------------------------------------------------------------------
/roles/swift_common/templates/swift.conf.j2:
--------------------------------------------------------------------------------
1 | [swift-hash]
2 |
3 | swift_hash_path_suffix = {{ swift_hash_path_suffix }}
4 | swift_hash_path_prefix = {{ swift_hash_path_prefix }}
--------------------------------------------------------------------------------
/roles/swift_common/vars/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | ubuntu_packages:
4 | - swift
5 | - python-swiftclient
6 | - openssh-server
7 | - rsync
8 |
9 | redhat_packages:
10 | - openstack-swift
11 | - rsync
12 | - python-swiftclient
--------------------------------------------------------------------------------
/roles/swiftclient/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: copy over testrc file
4 | template: src=testrc.j2 dest=/home/{{ ansible_env.SUDO_USER }}/testrc
5 |
6 | - name: copy over adminrc file
7 | template: src=adminrc.j2 dest=/home/{{ ansible_env.SUDO_USER }}/adminrc
--------------------------------------------------------------------------------
/roles/swiftclient/templates/adminrc.j2:
--------------------------------------------------------------------------------
1 | export OS_SERVICE_ENDPOINT=https://{{ keystone_server }}:35357/v2.0
2 | export OS_USERNAME={{ keystone_admin_user }}
3 | export OS_SERVICE_TOKEN={{ keystone_admin_token }}
4 |
5 |
--------------------------------------------------------------------------------
/roles/swiftclient/templates/testrc.j2:
--------------------------------------------------------------------------------
1 | export OS_AUTH_URL=https://{{ keystone_server }}:35357/v2.0
2 | export OS_USERNAME={{ keystone_test_user }}
3 | export OS_PASSWORD={{ keystone_test_user_password }}
4 | export OS_TENANT_NAME=demo
5 | export OS_REGION_NAME={{ keystone_region }}
6 | export SWIFTCLIENT_INSECURE=true
7 |
--------------------------------------------------------------------------------
/site.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | #
4 | # First setup a package_cache, especially when using
5 | # your laptop and Vagrant
6 | #
7 |
8 | - hosts: package_cache
9 | roles:
10 | - package_cache
11 |
12 | - hosts:
13 | - all
14 | roles:
15 | - common
16 |
17 | - hosts:
18 | - lbssl
19 | roles:
20 | - lbssl
21 |
22 | - hosts:
23 | - proxy
24 | - storage
25 | - package_cache
26 | roles:
27 | - swift_common
28 |
29 | - hosts:
30 | - swiftclient
31 | roles:
32 | - swiftclient
33 |
34 | - hosts:
35 | - authentication
36 | roles:
37 | - authentication
38 |
39 | - hosts:
40 | - proxy
41 | roles:
42 | - proxy
43 |
44 | #
45 | # I would have expected serial to work here, and
46 | # to be the right way to do it but at the time I
47 | # tried received "too many files open" errors.
48 | #
49 |
50 | - hosts:
51 | - storage
52 | serial: 1
53 | roles:
54 | - storage
55 |
56 | #
57 | # These are not so much roles as starting services
58 | # though also includes distributing the ring files.
59 | #
60 |
61 | - hosts:
62 | - proxy
63 | # vars_file:
64 | # - include: vars/{{ansible_os_family}}.yml
65 | tasks:
66 | - include: start_proxy.yml
67 |
68 | - hosts:
69 | - storage
70 | # vars_file:
71 | # - include: vars/{{ansible_os_family}}.yml
72 | tasks:
73 | - include: start_storage.yml
74 |
75 | #
76 | # Probably best to run these on their own?
77 | #
78 |
79 | #- include: tests/tests.yml
--------------------------------------------------------------------------------
/start_proxy.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | #
4 | # Create files necessary to start the proxy and also start it
5 | #
6 |
7 | - name: rebalance rings
8 | command: swift-ring-builder {{ item }}.builder rebalance
9 | creates=/etc/swift/{{ item }}.ring.gz
10 | chdir=/etc/swift
11 | register: rebalance
12 | with_items:
13 | - account
14 | - object
15 | - container
16 |
17 | # This will fetch from all proxies, if there are more than one and put the rings files in the same spot, which is Ok but not perfect
18 | - name: grab resulting *.ring.gz files and put them on all proxy and storage nodes
19 | fetch: dest=fetch/{{ item }}.ring.gz flat=yes src=/etc/swift/{{ item }}.ring.gz
20 | when: rebalance.changed
21 | with_items:
22 | - account
23 | - object
24 | - container
25 |
26 | - name: start swift-proxy on proxy nodes
27 | service: name=swift-proxy state=running
28 | when: ansible_os_family == 'Debian'
29 |
30 | - name: start swift-proxy on proxy nodes
31 | service: name=openstack-swift-proxy state=running
32 | when: ansible_os_family == 'RedHat'
33 |
34 | - name: start object-expirer on proxy nodes
35 | command: swift-init object-expirer start
--------------------------------------------------------------------------------
/start_storage.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | # XXX FIX ME - duplicate code XXX
4 | - name: put the *.ring.gz files on all storage servers
5 | copy: src=fetch/{{ item }}.ring.gz dest=/etc/swift/{{item}}.ring.gz owner=swift group=swift mode=0640
6 | register: new_rings
7 | with_items:
8 | - account
9 | - object
10 | - container
11 |
12 | - name: make sure default configuration files are *not* there
13 | file: state=absent path=/etc/swift/{{ item }}-server.conf
14 | with_items:
15 | - account
16 | - object
17 | - container
18 |
19 | - name: restart swift-storage node services
20 | command: swift-init all stop
21 | when: new_rings.changed
22 | ignore_errors: True
23 |
24 | - name: restart swift-storage node services
25 | command: swift-init all start
26 | when: new_rings.changed
27 |
28 | # XXX FIX ME XXX
29 | - name: restart syslog
30 | service: name=rsyslog state=restarted
31 | when: new_rings.changed
32 |
--------------------------------------------------------------------------------
/tests/gauntlt.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | #
4 | # Run security tests using gauntlt
5 | #
6 |
7 | # nokogiri requires 1.9.2+, darn
8 | - name: install ruby 1.9.3
9 | apt: name=ruby1.9.3 state=installed
10 |
11 | - name: setup alternative ruby1.9.3 as ruby
12 | file: src=/usr/bin/ruby1.9.3 dest=/etc/alternatives/ruby state=link
13 |
14 | - name: setup alternative gem1.9.3 as gem
15 | file: src=/usr/bin/gem1.9.3 dest=/etc/alternatives/gem state=link
16 |
17 | - name: ensure packages gauntlt requires are installed
18 | apt: name={{ item }} state=installed
19 | with_items:
20 | - nmap
21 | #- rubygems
22 | - libxml2
23 | - libxslt1-dev
24 | - libxml2-dev
25 |
26 | - name: ensure gauntlt is installed
27 | gem: name={{ item }} state=present
28 | with_items:
29 | - gauntlt
30 | - json
31 |
32 | - name: create a /root/attack directory
33 | file: state=directory path=/root/gauntlt
34 |
35 | - name: copy over gauntlt attack files
36 | template: src=../attacks/ssl_proxy.attack.j2 dest=/root/gauntlt/ssl_proxy.attack
37 |
38 | - name: run gauntlt
39 | command: gauntlt
40 | chdir=/root
41 |
42 | - name: cleanup attack files
43 | command: rm -rf /root/gauntlt
--------------------------------------------------------------------------------
/tests/swiftclient.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | #
4 | # SSL Testing
5 | #
6 |
7 | # If cert is ok, returns "ok", otherwise is not ok, ok?
8 | #
9 | # client$ echo QUIT | openssl s_client -showcerts -connect google.com:443 -showcerts -debug 2>&1 | grep "Verify return code"
10 | # Verify return code: 0 (ok)
11 | # client$ echo QUIT | openssl s_client -showcerts -connect 192.168.100.50:35357 -showcerts -debug 2>&1 | grep "Verify return code"
12 | # Verify return code: 21 (unable to verify the first certificate)
13 |
14 | - name: verify ssl cert
15 | command: echo QUIT | openssl s_client -showcerts -connect 192.168.100.50:35357 2>&1 | grep "Verify return code"
16 | register: openssl_s_client
17 |
18 | - name: test for openssl client failed
19 | debug: msg="ssl client failed"
20 | when: openssl_s_client.stdout.find("ok") == -1
21 |
22 | #
23 | # [root@swift-keystone-01 ~]# curl --cacert /etc/keystone/ssl/certs/ca.pem https://192.168.100.50:35357
24 | # curl: (51) SSL: certificate subject name 'localhost' does not match target host name '192.168.100.50'
25 | #
26 |
27 |
28 | #
29 | #
30 | #
31 |
32 | - name: check if adminrc exists
33 | stat: path=/home/vagrant/adminrc
34 |
35 | - name: run keystone user-list using adminrc file
36 | shell: chdir=/home/vagrant executable="/bin/bash" source ./adminrc; keystone --insecure user-list
37 |
38 | #
39 | # Run some tests using swift client
40 | #
41 |
42 | - name: check if testrc exists
43 | stat: path=/home/vagrant/testrc
44 |
45 | - name: run swift stat using testrc file
46 | shell: chdir=/home/vagrant executable="/bin/bash" source ./testrc; swift stat
47 |
48 | - name: run swift list using testrc file
49 | shell: chdir=/home/vagrant executable="/bin/bash" source ./testrc; swift list
50 |
51 | - name: create a file to upload
52 | shell: chdir=/home/vagrant executable="/bin/bash" echo $RANDOM > swift.txt
53 |
54 | - name: get md5sum of uploaded file
55 | command: chdir=/home/vagrant md5sum swift.txt
56 | register: md5sum_upload
57 |
58 | - name: upload a file into swift
59 | shell: chdir=/home/vagrant executable="/bin/bash" source ./testrc; swift upload swifty swift.txt
60 | - name: remove swift.txt
61 | command: chdir=/home/vagrant rm -f ./swift.txt
62 |
63 | - name: download file again
64 | shell: chdir=/home/vagrant executable="/bin/bash" source ./testrc; swift download swifty swift.txt
65 |
66 | - name: get md5sum of downloaded file
67 | command: chdir=/home/vagrant md5sum swift.txt
68 | register: md5sum_download
69 |
70 | - fail: msg="md5sum for downloaded swift.txt file does not match uploaded md5sum"
71 | when: md5sum_download.stdout != md5sum_upload.stdout
72 |
73 | # NOTE: This will delete all containers, files!
74 | - name: delete all from swifty container
75 | shell: chdir=/home/vagrant executable="/bin/bash" source ./testrc; swift delete --all
76 |
77 | - name: remove swift.txt
78 | command: chdir=/home/vagrant rm -f ./swift.txt
--------------------------------------------------------------------------------
/tests/tests.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | #
4 | # Run some tests to verify installation
5 | #
6 |
7 | - hosts: swiftclient
8 | tasks:
9 | - include: swiftclient.yml
10 |
11 | #- hosts: gauntlt
12 | # tasks:
13 | # - include: gauntlt.yml
--------------------------------------------------------------------------------