├── roles ├── ha │ ├── templates │ │ ├── authkeys │ │ ├── haresources │ │ ├── ha.cf │ │ ├── floatip │ │ └── assign-ip │ ├── defaults │ │ └── main.yml │ └── tasks │ │ └── main.yml ├── gluster-minio │ ├── defaults │ │ └── main.yml │ ├── filter_plugins │ │ └── to_group_vars.py │ └── tasks │ │ └── main.yml └── minio │ ├── handlers │ └── main.yml │ ├── templates │ ├── caddy-conf │ ├── update-motd.sh │ ├── caddy-minio-upstart.conf │ └── minio-upstart.conf │ ├── defaults │ └── main.yml │ └── tasks │ └── main.yml ├── gen_auth_key ├── .gitignore ├── site.yml ├── terraform.tfvars.sample ├── conf └── cloud-config.yaml ├── provider.tf ├── group_vars └── node.sample ├── ansible.cfg ├── resources.tf └── README.md /roles/ha/templates/authkeys: -------------------------------------------------------------------------------- 1 | auth1 2 | 1 sha1 {{ ha_auth_key }} -------------------------------------------------------------------------------- /roles/ha/templates/haresources: -------------------------------------------------------------------------------- 1 | {{ hostvars[groups['node'][0]].name }} floatip -------------------------------------------------------------------------------- /gen_auth_key: -------------------------------------------------------------------------------- 1 | dd if='/dev/urandom' bs=512 count=1 2>'/dev/null' \ 2 | | openssl sha1 3 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | site.retry 3 | *.tfstate.* 4 | *.tfstate 5 | *.tfvars 6 | group_vars/node 7 | *.pyc 8 | -------------------------------------------------------------------------------- /site.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: node 3 | roles: 4 | - gluster-minio 5 | - minio 6 | - ha 7 | become: True 8 | 9 | -------------------------------------------------------------------------------- /terraform.tfvars.sample: -------------------------------------------------------------------------------- 1 | do_token="" 2 | private_key_path="" 3 | ssh_fingerprint="" 4 | region="" 5 | project="app1" 6 | public_key="" -------------------------------------------------------------------------------- /roles/ha/templates/ha.cf: -------------------------------------------------------------------------------- 1 | node {{ primary_name }} 2 | ucast eth0 {{ primary_address }} 3 | node {{ secondary_name }} 4 | ucast eth0 {{ secondary_address }} -------------------------------------------------------------------------------- /roles/gluster-minio/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | cluster_member_ips: "{{ hostvars | to_group_vars(groups, 'node') | map (attribute='ipv4_address_private') | join(',') }}" -------------------------------------------------------------------------------- /roles/minio/handlers/main.yml: -------------------------------------------------------------------------------- 1 | - name: restart minio 2 | service: name=minio state=restarted 3 | 4 | - name: restart caddy-minio 5 | service: name=caddy-minio state=restarted 6 | -------------------------------------------------------------------------------- /conf/cloud-config.yaml: -------------------------------------------------------------------------------- 1 | #cloud-config 2 | users: 3 | - name: ubuntu 4 | shell: /bin/bash 5 | sudo: ['ALL=(ALL) NOPASSWD:ALL'] 6 | ssh-authorized-keys: 7 | - ${public_key} 8 | -------------------------------------------------------------------------------- /provider.tf: -------------------------------------------------------------------------------- 1 | variable "do_token" {} 2 | variable "private_key_path" {} 3 | variable "ssh_fingerprint" {} 4 | variable "region" {} 5 | 6 | provider "digitalocean" { 7 | token = "${var.do_token}" 8 | } 9 | -------------------------------------------------------------------------------- /roles/minio/templates/caddy-conf: -------------------------------------------------------------------------------- 1 | {{ minio_host }} { 2 | proxy / 127.0.0.1:9000 { 3 | proxy_header Host {host} 4 | proxy_header X-Real-IP {remote} 5 | proxy_header X-Forwarded-Proto {scheme} 6 | } 7 | } -------------------------------------------------------------------------------- /group_vars/node.sample: -------------------------------------------------------------------------------- 1 | # floating ip issued by DigitalOcean 2 | # floating_ip: 3 | 4 | # DigitalOcean access token 5 | # do_token: 6 | 7 | # Generated ha auth key. Consult README.md for how to generate. 8 | # ha_auth_key: 9 | 10 | # Floating IP hostname 11 | # minio_host: 12 | -------------------------------------------------------------------------------- /ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | ansible_managed = Please do not change this file directly since it is managed by Ansible and will be overwritten 3 | remote_user = ubuntu 4 | host_key_checking=false 5 | 6 | [ssh_connection] 7 | pipelining = True 8 | ssh_args = -o ControlMaster=auto -o ControlPersist=60s 9 | 10 | -------------------------------------------------------------------------------- /roles/ha/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | primary_node: "{{ hostvars[groups['node'][0]]}}" 3 | primary_name: "{{ primary_node.name }}" 4 | primary_address: "{{ primary_node.ansible_default_ipv4.address }}" 5 | secondary_node: "{{ hostvars[groups['node'][1]]}}" 6 | secondary_name: "{{ secondary_node.name }}" 7 | secondary_address: "{{ secondary_node.ansible_default_ipv4.address }}" -------------------------------------------------------------------------------- /roles/gluster-minio/filter_plugins/to_group_vars.py: -------------------------------------------------------------------------------- 1 | from ansible import errors 2 | import json 3 | 4 | def to_group_vars(host_vars, groups, target = 'all'): 5 | data = [] 6 | for host in groups[target]: 7 | data.append(host_vars[host]) 8 | return data 9 | 10 | class FilterModule (object): 11 | def filters(self): 12 | return {"to_group_vars": to_group_vars} -------------------------------------------------------------------------------- /roles/minio/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | caddy_url: https://caddyserver.com/download/build?os=linux&arch=amd64&features= 3 | minio_url: https://dl.minio.io/server/minio/release/linux-amd64/minio 4 | 5 | minio_bin: /usr/local/bin/minio 6 | minio_user: minio 7 | minio_group: minio 8 | 9 | minio_storage_dir: /mnt/minio1/storage 10 | minio_config_dir: /mnt/minio1/config 11 | 12 | 13 | 14 | 15 | -------------------------------------------------------------------------------- /roles/minio/templates/update-motd.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [[ -f /mnt/minio1/config/config.json ]]; then 4 | cat /mnt/minio1/config/config.json \ 5 | | jq '.credentials.accessKeyId + "," + .credentials.secretAccessKey' \ 6 | | sed s/\"//g \ 7 | | awk '{split($0,creds,","); print "\n\n\tMinio Credentials\n\n\tAccess Key ID: " creds[1] "\n\tSecret Access Key: " creds[2] "\n\n"}' > /etc/motd 8 | fi -------------------------------------------------------------------------------- /roles/minio/templates/caddy-minio-upstart.conf: -------------------------------------------------------------------------------- 1 | 2 | # /etc/init/caddy-minio.conf 3 | 4 | description "caddy-minio" 5 | author "Bryan Liles " 6 | 7 | start on (local-filesystems and net-device-up IFACE=eth0) 8 | stop on shutdown 9 | 10 | # Automatically Respawn: 11 | respawn 12 | respawn limit 99 5 13 | 14 | script 15 | USER="{{ minio_user }}" 16 | BIN="/opt/caddy/caddy" 17 | OPTS="-conf=/etc/caddy-minio.conf" 18 | exec sudo -u $USER $BIN $OPTS 19 | end script -------------------------------------------------------------------------------- /roles/ha/templates/floatip: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | param=$1 4 | 5 | export DO_TOKEN='{{ do_token }}' 6 | IP='{{ floating_ip }}' 7 | ID=$(curl -s http://169.254.169.254/metadata/v1/id) 8 | 9 | if [ "start" == "$param" ] ; then 10 | python /usr/local/bin/assign-ip $IP $ID 11 | service minio start 12 | exit 0 13 | elif [ "stop" == "$param" ] ; then 14 | service minio stop 15 | exit 0; 16 | elif [ "status" == "$param" ] ; then 17 | exit 0; 18 | else 19 | echo "no such command $param" 20 | exit 1; 21 | fi -------------------------------------------------------------------------------- /roles/minio/templates/minio-upstart.conf: -------------------------------------------------------------------------------- 1 | # /etc/init/minio.conf 2 | 3 | description "minio" 4 | author "Bryan Liles " 5 | 6 | start on (started caddy-minio) 7 | stop on shutdown 8 | 9 | # Automatically Respawn: 10 | respawn 11 | respawn limit 99 5 12 | 13 | script 14 | USER="{{ minio_user }}" 15 | BIN="{{ minio_bin }}" 16 | ADDR_OPTS="--address 127.0.0.1:9000 -C {{ minio_config_dir }}" 17 | OPTS="--min-free-disk 5% {{ minio_storage_dir }}" 18 | mkdir -p {{ minio_storage_dir }} 19 | mkdir -p {{ minio_config_dir }} 20 | chown $USER {{ minio_storage_dir }} 21 | chown $USER {{ minio_config_dir }} 22 | exec sudo -u $USER $BIN $ADDR_OPTS server $OPTS 23 | end script -------------------------------------------------------------------------------- /roles/ha/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: install heartbeat components 3 | apt: name="{{ item }}" state=present update_cache=yes cache_valid_time=3600 4 | with_items: 5 | - heartbeat 6 | 7 | - name: configure heartbeat 8 | template: src=ha.cf dest=/etc/ha.d/ha.cf mode=0644 9 | 10 | - name: configure authkey 11 | template: src=authkeys dest=/etc/ha.d/authkeys mode=0600 12 | 13 | - name: configurea haresources 14 | template: src=haresources dest=/etc/ha.d/haresources 15 | 16 | - name: install floating ip assigner 17 | template: src=assign-ip dest=/usr/local/bin/assign-ip mode=0755 18 | 19 | - name: install floating ip manager 20 | template: src=floatip dest=/etc/init.d/floatip mode=0755 21 | 22 | - name: start heartbeat service 23 | service: name=heartbeat state=started enabled=yes 24 | -------------------------------------------------------------------------------- /roles/gluster-minio/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: build hosts file 3 | lineinfile: dest=/etc/hosts regexp='.*{{ item }}$' line="{{ hostvars[item].ipv4_address_private }} {{hostvars[item].name}}" state=present 4 | when: hostvars[item].ipv4_address_private is defined 5 | with_items: "{{ groups['node'] }}" 6 | 7 | - name: configure glusterfs ppa 8 | apt_repository: repo='ppa:gluster/glusterfs-3.5' 9 | 10 | - name: install glusterfs components 11 | apt: name="{{ item }}" state=present update_cache=yes cache_valid_time=3600 12 | with_items: 13 | - glusterfs-server 14 | - glusterfs-client 15 | 16 | - name: configure storage volume 17 | gluster_volume: state=present name=minio1 replicas=2 bricks=/gluster-storage force=true cluster="{{ cluster_member_ips }}" 18 | run_once: true 19 | 20 | - name: start gluster volume 21 | gluster_volume: state=started name=minio1 22 | 23 | - name: mount gluster volume 24 | mount: name=/mnt/minio1 src="{{ hostvars[item].name }}:/minio1" fstype=glusterfs state=mounted passno=2 25 | with_items: "{{ groups['node'] }}" -------------------------------------------------------------------------------- /resources.tf: -------------------------------------------------------------------------------- 1 | variable "public_key" {} 2 | 3 | variable "project" {} 4 | 5 | variable "image" { 6 | default = "ubuntu-14-04-x64" 7 | } 8 | 9 | variable "size" { 10 | default = "4gb" 11 | } 12 | 13 | resource "digitalocean_droplet" "node" { 14 | count = "2" 15 | image = "${var.image}" 16 | name = "${var.project}-minio-ha-${count.index+1}" 17 | region = "${var.region}" 18 | size = "${var.size}" 19 | private_networking = true 20 | ssh_keys = [ 21 | "${var.ssh_fingerprint}" 22 | ] 23 | user_data = "${template_file.user_data.rendered}" 24 | 25 | connection { 26 | user = "root" 27 | type = "ssh" 28 | key_file = "${var.private_key_path}" 29 | timeout = "2m" 30 | } 31 | 32 | provisioner "remote-exec" { 33 | inline = [ "# droplet up" ] 34 | } 35 | } 36 | 37 | resource "template_file" "user_data" { 38 | template = "${file("${path.module}/conf/cloud-config.yaml")}" 39 | 40 | vars { 41 | public_key = "${var.public_key}" 42 | } 43 | } 44 | 45 | resource "digitalocean_floating_ip" "fip" { 46 | region = "${var.region}" 47 | droplet_id = "${digitalocean_droplet.node.0.id}" 48 | } -------------------------------------------------------------------------------- /roles/ha/templates/assign-ip: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | 3 | import os 4 | import sys 5 | import requests 6 | import json 7 | 8 | api_base = 'https://api.digitalocean.com/v2' 9 | 10 | 11 | def usage(): 12 | print('{0} [Floating IP] [Droplet ID]'.format(sys.argv[0])) 13 | print('\nYour DigitialOcean API token must be in the "DO_TOKEN"' 14 | ' environmental variable.') 15 | 16 | 17 | def main(floating_ip, droplet_id): 18 | payload = {'type': 'assign', 'droplet_id': droplet_id} 19 | headers = {'Authorization': 'Bearer {0}'.format(os.environ['DO_TOKEN']), 20 | 'Content-type': 'application/json'} 21 | url = api_base + "/floating_ips/{0}/actions".format(floating_ip) 22 | r = requests.post(url, headers=headers, data=json.dumps(payload)) 23 | 24 | resp = r.json() 25 | if 'message' in resp: 26 | print('{0}: {1}'.format(resp['id'], resp['message'])) 27 | sys.exit(1) 28 | else: 29 | print('Moving IP address: {0}'.format(resp['action']['status'])) 30 | 31 | if __name__ == "__main__": 32 | if 'DO_TOKEN' not in os.environ or not len(sys.argv) > 2: 33 | usage() 34 | sys.exit() 35 | main(sys.argv[1], sys.argv[2]) 36 | -------------------------------------------------------------------------------- /roles/minio/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: create minio group 2 | group: name="{{ minio_user }}" state=present 3 | 4 | - name: create minio user 5 | user: name=minio shell=/bin/bash group="{{ minio_group }}" 6 | 7 | - name: install minio components 8 | apt: name="{{ item }}" state=present update_cache=yes cache_valid_time=3600 9 | with_items: 10 | - jq 11 | 12 | - name: download minio 13 | get_url: url="{{ minio_url }}" dest="{{ minio_bin }}" mode=0755 14 | 15 | - name: create minio upstart 16 | template: src=minio-upstart.conf dest=/etc/init/minio.conf 17 | 18 | - name: create caddy directory 19 | file: path=/opt/caddy state=directory 20 | 21 | - name: download caddy 22 | get_url: url="{{ caddy_url }}" dest=/tmp/caddy.tar.gz 23 | 24 | - name: extract caddy 25 | unarchive: copy=no src=/tmp/caddy.tar.gz dest=/opt/caddy 26 | 27 | - name: allow caddy to run on ports less than 1024 28 | command: setcap cap_net_bind_service=+ep /opt/caddy/caddy 29 | 30 | - name: create caddy upstart 31 | template: src=caddy-minio-upstart.conf dest=/etc/init/caddy-minio.conf 32 | 33 | - name: create caddy config 34 | template: src=caddy-conf dest=/etc/caddy-minio.conf 35 | 36 | - name: start caddy 37 | service: name=caddy-minio state=started enabled=yes 38 | 39 | - name: create motd update script 40 | template: src=update-motd.sh dest=/usr/local/bin/update-motd.sh mode=0755 41 | 42 | - name: set up motd cron 43 | cron: name="update motd" job="/usr/local/bin/update-motd.sh > /dev/null" -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # HA Minio 2 | 3 | Demonstration of how to configure [Minio](https://minio.io/), a S3 compatible 4 | Cloud Storage Server for DigitalOcean in a highly available fashion. 5 | 6 | This installation requires Terraform and Ansible. 7 | 8 | # Prerequisites 9 | 10 | ## Terraform 11 | 12 | Install [Terraform](https://www.terraform.io/downloads.html) on your system. If you are using 13 | MacOS X and Homebrew, you can install it with `brew install terraform` 14 | 15 | ## Ansible 16 | 17 | Install [Ansible](http://docs.ansible.com/ansible/intro_installation.html) on your system. If 18 | you are using MacOS X and Homebrew, you can install it with `brew install ansible` 19 | 20 | Since inventory is managed by Terraform, you can use [terraform-inventory](https://github.com/adammck/terraform-inventory) to supply 21 | Ansible with the Droplet configuration. If you are using MacOS X and Homebrew, you can 22 | install it with `brew install terraform-inventory`. 23 | 24 | # Install Minio on Droplets 25 | 26 | ## Provision Droplets 27 | 28 | 1. Terraform is configured through `terraform.tvfars`. A sample file, `terraform.tfvars.sample`, 29 | has been included. 30 | 31 | 1. Use Terraform to build Droplets and Floating IP. `terraform apply`. 32 | 1. Once Droplets and Floating IP have been created, retrieve the assigned Floating IP. 33 | 1. Assign a hostname to the Floating IP that was created. This hostname will be used to 34 | automatically generate a TLS certificate when configuring the Droplet. 35 | 36 | ## Configure Droplets 37 | 38 | 1. Copy `group_vars/node.sample` to `group_vars/node`. 39 | 1. Create Ansible node configuration: 40 | 1. Un-comment and fill in `floating_ip`. 41 | 1. Un-comment and fill in `do_token` with your DigitalOcean Access Token. 42 | 1. Un-comment and fill in `minio_host` with the hostname you assigned to your Floating IP. 43 | 1. Use `gen_auth_key` to generate an auth key for the cluster. Un-comment and 44 | fill in `ha_auth_key` with the generated key. 45 | 1. Use Ansible to configure the Droplets for Minio. `ansible-playbook -i /usr/local/bin/terraform-inventory site.yml` 46 | 47 | ## Using Minio 48 | 49 | Once Ansible has completed running your Cloud Storage site will be available at 50 | https://. 51 | 52 | 53 | 54 | ## Extras 55 | 56 | Generating a fingerprint for your public ssh key: 57 | 58 | ```sh 59 | ssh-keygen -E md5 -lf ~/.ssh/id_rsa.pub 60 | ``` 61 | 62 | 63 | --------------------------------------------------------------------------------