├── roles ├── package │ ├── action_plugins │ │ ├── __init__.py │ │ ├── common.py │ │ ├── dcos_iam_group.py │ │ ├── dcos_iam_user.py │ │ ├── dcos_secret.py │ │ ├── dcos_marathon.py │ │ ├── dcos_connection.py │ │ ├── dcos_package.py │ │ ├── dcos_edgelb.py │ │ └── dcos_iam_serviceaccount.py │ ├── kubernetes │ │ ├── meta │ │ │ └── main.yml │ │ ├── defaults │ │ │ └── main.yml │ │ └── tasks │ │ │ ├── main.yml │ │ │ ├── uninstall.yml │ │ │ ├── kubectl.yml │ │ │ ├── kubernetes-proxy.yml │ │ │ └── kubernetes.yml │ ├── defaults │ │ └── main.yml │ └── tasks │ │ └── main.yml ├── docker │ ├── defaults │ │ └── main.yml │ ├── templates │ │ └── override.conf.j2 │ ├── handlers │ │ └── main.yml │ └── tasks │ │ └── main.yml ├── node-install │ ├── defaults │ │ └── main.yml │ └── tasks │ │ └── main.yml ├── node-upgrade │ ├── defaults │ │ └── main.yml │ └── tasks │ │ ├── main.yml │ │ ├── master.yml │ │ └── agent.yml ├── common │ ├── defaults │ │ └── main.yml │ └── tasks │ │ └── main.yml └── bootstrap │ ├── templates │ ├── ip-detect-public-azure.j2 │ ├── ip-detect-aws.j2 │ ├── ip-detect-onprem.j2 │ ├── ip-detect-public-aws.j2 │ ├── ip-detect-gcp.j2 │ ├── rexray.yaml.j2 │ ├── ip-detect-public-gcp.j2 │ ├── ip-detect-azure.j2 │ └── config.yaml.j2 │ ├── tasks │ ├── init.yml │ ├── upgrade.yml │ ├── main.yml │ └── pre-check.yml │ ├── defaults │ └── main.yml │ └── files │ └── fault-domain-detect ├── ansible.cfg ├── scripts ├── kubeapi-proxy-gcp.sh ├── kubeapi-proxy-azure.sh └── kubeapi-proxy-aws.sh ├── resources ├── main-k8s-api.tf.aws ├── desired_cluster_profile.aws ├── desired_cluster_profile.azure ├── main-k8s-api.tf.gcp ├── main-k8s-api.tf.azure ├── desired_cluster_profile.gcp ├── override.aws.tf ├── override.azure.tf └── override.gcp.tf ├── plays ├── packages.yml ├── kubernetes.yml ├── install.yml ├── upgrade.yml ├── uninstall.yml └── access-onprem.yml ├── .gitignore ├── docs ├── ACCESS_ONPREM.md ├── UPGRADE_DCOS.md ├── DCOS_AGENTS.md ├── INSTALL_ONPREM.md ├── INSTALL_AZURE.md ├── INSTALL_GCP.md ├── INSTALL_AWS.md └── INSTALL_KUBERNETES.md ├── hosts.example.yaml ├── group_vars └── all.example ├── README.md ├── CHANGELOG.md ├── Makefile ├── inventory.py └── LICENSE /roles/package/action_plugins/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /roles/docker/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | docker_version: 17.12.1.ce 3 | -------------------------------------------------------------------------------- /roles/package/kubernetes/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - role: package 4 | -------------------------------------------------------------------------------- /roles/package/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dcos_cli_enabled: true 3 | dcos_cli_upgrade: false 4 | -------------------------------------------------------------------------------- /roles/node-install/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dcos_path_tmp: /tmp/dcos 3 | dcos_port_webserver: 8080 4 | -------------------------------------------------------------------------------- /roles/node-upgrade/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dcos_path_tmp: /tmp/dcos 3 | dcos_port_webserver: 8080 4 | -------------------------------------------------------------------------------- /roles/common/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Install latest operating system updates 3 | os_system_updates: False 4 | -------------------------------------------------------------------------------- /ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | inventory = hosts.yaml 3 | host_key_checking = False 4 | remote_user = centos 5 | roles_path = ./roles 6 | -------------------------------------------------------------------------------- /roles/docker/templates/override.conf.j2: -------------------------------------------------------------------------------- 1 | [Service] 2 | ExecStart= 3 | ExecStart=/usr/bin/dockerd --storage-driver=overlay --log-driver=none 4 | -------------------------------------------------------------------------------- /roles/docker/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart docker 3 | systemd: 4 | name: docker 5 | state: restarted 6 | daemon_reload: yes 7 | -------------------------------------------------------------------------------- /scripts/kubeapi-proxy-gcp.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Add access for kubeapi via 6443 port 4 | 5 | # Update main.tf file 6 | cat ../resources/main-k8s-api.tf.gcp >> ../.deploy/main.tf 7 | -------------------------------------------------------------------------------- /scripts/kubeapi-proxy-azure.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Add access for kubeapi via 6443 port 4 | 5 | # Update main.tf file 6 | cat ../resources/main-k8s-api.tf.azure >> ../.deploy/main.tf 7 | -------------------------------------------------------------------------------- /resources/main-k8s-api.tf.aws: -------------------------------------------------------------------------------- 1 | 2 | # https on port 6443 access from anywhere 3 | ingress { 4 | from_port = 6443 5 | to_port = 6443 6 | protocol = "tcp" 7 | cidr_blocks = ["${var.admin_cidr}"] 8 | } 9 | -------------------------------------------------------------------------------- /roles/package/kubernetes/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dcos_deploy_ee_package: false 3 | dcos_k8s_enabled: true 4 | 5 | dcos_k8s_app_id: "kubernetes" 6 | dcos_k8s_proxy_app_id: "{{ dcos_k8s_app_id }}-proxy" 7 | dcos_k8s_package_version: '1.2.1-1.10.6' 8 | -------------------------------------------------------------------------------- /roles/bootstrap/templates/ip-detect-public-azure.j2: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -o nounset -o errexit 3 | curl -H Metadata:true -fsSL "http://169.254.169.254/metadata/instance/network/interface/0/ipv4/ipAddress/0/publicIpAddress?api-version=2017-04-02&format=text" 4 | -------------------------------------------------------------------------------- /roles/bootstrap/templates/ip-detect-aws.j2: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # Example ip-detect script using an external authority 3 | # Uses the AWS Metadata Service to get the node's internal 4 | # ipv4 address 5 | curl -fsSL http://169.254.169.254/latest/meta-data/local-ipv4 6 | -------------------------------------------------------------------------------- /roles/bootstrap/templates/ip-detect-onprem.j2: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -o nounset -o errexit 3 | export PATH=/usr/sbin:/usr/bin:$PATH 4 | echo $(ip addr show {{ dcos_ip_detect_interface }} | grep -Eo '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}' | head -1) 5 | -------------------------------------------------------------------------------- /roles/bootstrap/templates/ip-detect-public-aws.j2: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # Example ip-detect script using an external authority 3 | # Uses the AWS Metadata Service to get the node's internal 4 | # ipv4 address 5 | curl -fsSL http://169.254.169.254/latest/meta-data/public-ipv4 6 | -------------------------------------------------------------------------------- /roles/bootstrap/templates/ip-detect-gcp.j2: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # Example ip-detect script using an external authority 3 | # Uses the GCP Metadata Service to get the node's internal 4 | # ipv4 address 5 | curl -fsSL http://169.254.169.254/computeMetadata/v1/instance/network-interfaces/0/ip -H 'Metadata-Flavor: Google' 6 | -------------------------------------------------------------------------------- /roles/bootstrap/templates/rexray.yaml.j2: -------------------------------------------------------------------------------- 1 | rexray_config: 2 | rexray: 3 | loglevel: info 4 | service: ebs 5 | libstorage: 6 | integration: 7 | volume: 8 | operations: 9 | unmount: 10 | ignoreusedcount: true 11 | server: 12 | tasks: 13 | logTimeout: 5m 14 | -------------------------------------------------------------------------------- /roles/bootstrap/templates/ip-detect-public-gcp.j2: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # Example ip-detect script using an external authority 3 | # Uses the GCP Metadata Service to get the node's internal 4 | # ipv4 address 5 | curl -fsSL http://169.254.169.254/computeMetadata/v1/instance/network-interfaces/0/access-configs/0/external-ip -H 'Metadata-Flavor: Google' 6 | -------------------------------------------------------------------------------- /roles/bootstrap/templates/ip-detect-azure.j2: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # Example ip-detect script using an external authority 3 | # Uses the AWS Metadata Service to get the node's internal 4 | # ipv4 address 5 | curl -H Metadata:true -fsSL "http://169.254.169.254/metadata/instance/network/interface/0/ipv4/ipAddress/0/privateIpAddress?api-version=2017-04-02&format=text" 6 | -------------------------------------------------------------------------------- /scripts/kubeapi-proxy-aws.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Add access for kubeapi via 6443 port 4 | 5 | # 6 | unamestr=`uname` 7 | if [[ "$unamestr" == "Linux" ]] 8 | then 9 | SED='sed -i' 10 | else 11 | SED='sed -i ""' 12 | fi 13 | 14 | # Update main.tf file 15 | ${SED} -i "" '/http-https-security-group/!{p;d;};n;n;r ../resources/main-k8s-api.tf.aws' ../.deploy/main.tf 16 | -------------------------------------------------------------------------------- /plays/packages.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: install packages 3 | hosts: localhost 4 | serial: 1 5 | become: false 6 | environment: 7 | PATH: '{{ ansible_env.PATH }}:{{ ansible_env.PWD }}/..' 8 | 9 | roles: 10 | - package 11 | 12 | tasks: 13 | - name: ensure spark is installed 14 | dcos_package: 15 | name: spark 16 | state: present 17 | version: 2.0.1-2.2.0-1 18 | -------------------------------------------------------------------------------- /plays/kubernetes.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: install kubernetes as a service 3 | hosts: localhost 4 | serial: 1 5 | become: false 6 | environment: 7 | PATH: '{{ ansible_env.PATH }}:{{ ansible_env.PWD }}/..' 8 | 9 | roles: 10 | - role: package/kubernetes 11 | vars: 12 | dcos_k8s_enabled: true 13 | dcos_k8s_app_id: 'kubernetes' 14 | dcos_k8s_package_version: '1.2.1-1.10.6' 15 | -------------------------------------------------------------------------------- /plays/install.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: setup the system requirements on all nodes 3 | hosts: all 4 | serial: 20 5 | become: true 6 | roles: 7 | - common 8 | - docker 9 | 10 | - name: generate the DC/OS configuration 11 | hosts: bootstraps 12 | serial: 1 13 | become: true 14 | roles: 15 | - bootstrap 16 | 17 | - name: deploy nodes 18 | hosts: [ masters, agents, agent_publics] 19 | serial: 20 20 | become: true 21 | roles: 22 | - node-install 23 | -------------------------------------------------------------------------------- /resources/desired_cluster_profile.aws: -------------------------------------------------------------------------------- 1 | os = "centos_7.4" 2 | state = "none" 3 | # 4 | num_of_masters = "1" 5 | num_of_private_agents = "3" 6 | num_of_public_agents = "1" 7 | # 8 | aws_region = "us-west-2" 9 | aws_bootstrap_instance_type = "m4.large" 10 | aws_master_instance_type = "m4.2xlarge" 11 | aws_agent_instance_type = "m4.2xlarge" 12 | aws_public_agent_instance_type = "m4.2xlarge" 13 | ssh_key_name = "default" 14 | # 15 | # Inbound Master Access 16 | admin_cidr = "0.0.0.0/0" 17 | -------------------------------------------------------------------------------- /roles/package/kubernetes/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This playbook manages Kubernetes on DC/OS as a service 3 | 4 | - name: install kubernetes 5 | include_tasks: kubernetes.yml 6 | when: dcos_k8s_enabled 7 | 8 | - name: install kubernetes proxy 9 | include_tasks: kubernetes-proxy.yml 10 | when: dcos_k8s_enabled 11 | 12 | - name: connect kubectl 13 | include_tasks: kubectl.yml 14 | when: dcos_k8s_enabled 15 | 16 | - name: uninstall kubernetes 17 | include_tasks: uninstall.yml 18 | when: not dcos_k8s_enabled 19 | -------------------------------------------------------------------------------- /resources/desired_cluster_profile.azure: -------------------------------------------------------------------------------- 1 | os = "centos_7.3" 2 | state = "none" 3 | # 4 | num_of_masters = "1" 5 | num_of_private_agents = "3" 6 | num_of_public_agents = "1" 7 | # 8 | azure_region = "East US 2" 9 | azure_bootstrap_instance_type = "Standard_DS1_v2" 10 | azure_master_instance_type = "Standard_D4_v2" 11 | azure_agent_instance_type = "Standard_D4_v2" 12 | azure_public_agent_instance_type = "Standard_D4_v2" 13 | # 14 | # Inbound Master Access 15 | admin_cidr = "0.0.0.0/0" 16 | 17 | ssh_pub_key = "INSERT_AZURE_PUBLIC_KEY_HERE" -------------------------------------------------------------------------------- /resources/main-k8s-api.tf.gcp: -------------------------------------------------------------------------------- 1 | 2 | 3 | # Allow access to Kubernetes API from the outside world specified by the user source range 4 | resource "google_compute_firewall" "kubeapi-proxy" { 5 | name = "${data.template_file.cluster-name.rendered}-kubeapi-proxy" 6 | network = "${google_compute_network.default.name}" 7 | allow { 8 | protocol = "tcp" 9 | ports = ["6443"] 10 | } 11 | 12 | source_ranges = ["${var.admin_cidr}"] 13 | description = "Used to allow access to Kubernetes API from the outside world specified by the user source range." 14 | } 15 | -------------------------------------------------------------------------------- /roles/package/kubernetes/tasks/uninstall.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This playbook uninstalls kubernetes on DC/OS as a service 3 | 4 | - name: uninstall kubernetes 5 | dcos_package: 6 | name: kubernetes 7 | app_id: "{{ dcos_k8s_app_id }}" 8 | version: "{{ dcos_k8s_package_version }}" 9 | state: absent 10 | 11 | - name: uninstall kubernetes-proxy 12 | dcos_marathon: 13 | app_id: "{{ dcos_k8s_proxy_app_id }}" 14 | state: absent 15 | ignore_errors: true 16 | 17 | - name: delete the kubectl executable 18 | become: false 19 | file: 20 | path: '../kubectl' 21 | state: absent 22 | -------------------------------------------------------------------------------- /roles/node-upgrade/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This playbook upgrades all nodes 3 | 4 | - name: create temporary install directory 5 | file: path={{ dcos_path_tmp }} state=directory mode=0755 6 | 7 | - name: download installation file from the bootstrap 8 | get_url: url="http://{{ dcos_bootstrap_ip }}:{{ dcos_port_webserver }}/upgrade/latest/dcos_node_upgrade.sh" dest={{ dcos_path_tmp }}/dcos_node_upgrade.sh mode=0440 force=true 9 | 10 | - import_tasks: master.yml 11 | when: "'masters' in group_names" 12 | 13 | - import_tasks: agent.yml 14 | when: "'agents' in group_names" 15 | 16 | - import_tasks: agent.yml 17 | when: "'agent_publics' in group_names" 18 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # common 2 | .DS_Store 3 | .master_ip 4 | .master_lb_ip 5 | 6 | # cli 7 | dcos 8 | kubectl 9 | kubectl-version 10 | 11 | # ansible 12 | .vault 13 | **/vault 14 | hosts.yaml 15 | group_vars/all 16 | *.retry 17 | /tmp 18 | 19 | # vagrant 20 | .vagrant 21 | 22 | # terraform 23 | .deploy 24 | .terraform 25 | /*.tf 26 | /*.tf.disabled 27 | *.tfstate 28 | *.tfstate.backup 29 | terraform.tfvars 30 | .terraform.tfstate.lock.info 31 | modules/ 32 | desired_cluster_profile.tfvars.example 33 | desired_cluster_profile 34 | 35 | # terraform gce 36 | /account.json 37 | 38 | # ide 39 | .idea 40 | 41 | # keypair 42 | private-key.pem 43 | public-key.pem 44 | 45 | # python 46 | *.pyc 47 | -------------------------------------------------------------------------------- /plays/upgrade.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This playbook performs a DC/OS upgrade 3 | 4 | - name: setup the system requirements on all nodes 5 | hosts: all 6 | serial: 20 7 | become: true 8 | roles: 9 | - common 10 | - docker 11 | 12 | - name: generate upgrade configuration 13 | hosts: bootstraps 14 | serial: 1 15 | become: true 16 | vars: 17 | dcos_upgrade: true 18 | roles: 19 | - bootstrap 20 | 21 | - name: upgrade masters 22 | hosts: masters 23 | serial: 1 24 | become: true 25 | roles: 26 | - node-upgrade 27 | 28 | - name: upgrade agents 29 | hosts: [ agents, agent_publics] 30 | serial: 20 31 | become: true 32 | roles: 33 | - node-upgrade 34 | -------------------------------------------------------------------------------- /roles/node-upgrade/tasks/master.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This playbook upgrades the master nodes 3 | 4 | - name: upgrade node 5 | shell: "bash dcos_node_upgrade.sh" 6 | args: 7 | chdir: "{{ dcos_path_tmp }}" 8 | ignore_errors: yes 9 | 10 | - name: wait for mesos master 11 | wait_for: 12 | host: "{{ ansible_default_ipv4['address'] }}" 13 | port: 5050 14 | delay: 5 15 | 16 | - name: check if mesos master recovered 17 | uri: url=http://{{ ansible_default_ipv4['address'] }}:5050/metrics/snapshot return_content=true 18 | register: response 19 | until: "'registrar\\/log\\/recovered\":1.0' in response.content" 20 | retries: 12 21 | delay: 5 22 | changed_when: false 23 | when: dcos_ee_security != "strict" 24 | -------------------------------------------------------------------------------- /resources/main-k8s-api.tf.azure: -------------------------------------------------------------------------------- 1 | 2 | 3 | # Allow access to Kubernetes API from the outside world 4 | # Public Subnet NSG Rule 5 | resource "azurerm_network_security_rule" "public-subnet-kubeapiRule" { 6 | name = "HTTPSKUBEAPI" 7 | priority = 125 8 | direction = "Inbound" 9 | access = "Allow" 10 | protocol = "Tcp" 11 | source_port_range = "6443" 12 | destination_port_range = "6443" 13 | source_address_prefix = "*" 14 | destination_address_prefix = "*" 15 | resource_group_name = "${azurerm_resource_group.dcos.name}" 16 | network_security_group_name = "${azurerm_network_security_group.public_subnet_security_group.name}" 17 | } 18 | -------------------------------------------------------------------------------- /roles/node-upgrade/tasks/agent.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This playbook upgrades the agent nodes 3 | 4 | - name: delete file libltdl.so.7 to prevent conflicts 5 | file: path=/opt/mesosphere/lib/libltdl.so.7 state=absent 6 | 7 | - name: upgrade node 8 | shell: "bash dcos_node_upgrade.sh" 9 | args: 10 | chdir: "{{ dcos_path_tmp }}" 11 | ignore_errors: yes 12 | 13 | - name: wait for mesos agent 14 | wait_for: 15 | host: "{{ ansible_default_ipv4['address'] }}" 16 | port: 5051 17 | delay: 5 18 | 19 | - name: check if mesos agent reregistered 20 | uri: url=http://{{ ansible_default_ipv4['address'] }}:5051/metrics/snapshot return_content=true 21 | register: response 22 | until: "'slave\\/registered\":1.0' in response.content" 23 | retries: 12 24 | delay: 5 25 | changed_when: false 26 | when: dcos_ee_security != "strict" 27 | -------------------------------------------------------------------------------- /docs/ACCESS_ONPREM.md: -------------------------------------------------------------------------------- 1 | ## Steps for enable SSH access on nodes with Ansible 2 | 3 | Require an [installed ansible package](http://docs.ansible.com/ansible/latest/intro_installation.html) and generated ssh key pair on the ansible control machine: 4 | 5 | ``` 6 | ssh-keygen -t rsa -b 4096 -C "admin@example.com" -f ~/.ssh/ansible-dcos 7 | ssh-add ~/.ssh/ansible-dcos 8 | ``` 9 | 10 | Add the following lines to your `group_vars/all` and be sure all `bootstraps`, `masters`, `agents` and `agent_publics` nodes use the same initial user and password: 11 | 12 | ``` 13 | # For initial SSH access on nodes with Ansible 14 | ansible_password: "YOUR_PASSWORD" 15 | ansible_become_pass: "YOUR_PASSWORD" 16 | #initial_remote_user: root 17 | ``` 18 | 19 | Finally, you can enable access via ssh to all nodes for use with ansible by applying the Ansible playbook: 20 | 21 | ``` 22 | ansible-playbook plays/access-onprem.yml 23 | ``` 24 | -------------------------------------------------------------------------------- /docs/UPGRADE_DCOS.md: -------------------------------------------------------------------------------- 1 | # Steps for DC/OS upgrade On-Premises and on Cloud Providers 2 | 3 | In order to upgrade a cluster, you have to set the download URL for the target version of DC/OS inside of the file `group_vars/all/vars`. So for example if you want to upgrade to DC/OS 1.11.1, specify the version within the variable `dcos_version`. 4 | 5 | ```shell 6 | $ dcos_version: '1.11.1' 7 | ``` 8 | 9 | You also need to specify the DC/OS version that is currently running on the cluster within the variable `dcos_upgrade_from_version`: 10 | 11 | ```shell 12 | $ dcos_upgrade_from_version: '1.11.0' 13 | ``` 14 | 15 | ## On-Premises upgrade 16 | 17 | To start the upgrade trigger the play `plays/upgrade.yml`. The command for that is: 18 | 19 | ```shell 20 | $ ansible-playbook plays/upgrade.yml 21 | ``` 22 | 23 | ## Cloud Providers upgrade 24 | 25 | To start the upgrade trigger the play `plays/upgrade.yml`. The command for that is: 26 | 27 | ```shell 28 | $ ansible-playbook -i inventory.py plays/upgrade.yml 29 | ``` 30 | -------------------------------------------------------------------------------- /resources/desired_cluster_profile.gcp: -------------------------------------------------------------------------------- 1 | os = "centos_7.3" 2 | state = "none" 3 | # 4 | num_of_masters = "1" 5 | num_of_private_agents = "3" 6 | num_of_public_agents = "1" 7 | # 8 | gcp_project = "YOUR_GCP_PROJECT" 9 | gcp_region = "us-central1" 10 | gcp_ssh_pub_key_file = "/PATH/YOUR_GCP_SSH_PUBLIC_KEY.pub" 11 | # 12 | # If you want to use GCP service account key instead of GCP SDK 13 | # uncomment the line below and update it with the path to the key file 14 | #gcp_credentials_key_file = "/PATH/YOUR_GCP_SERVICE_ACCOUNT_KEY.json" 15 | # 16 | gcp_bootstrap_instance_type = "n1-standard-1" 17 | gcp_master_instance_type = "n1-standard-8" 18 | gcp_agent_instance_type = "n1-standard-8" 19 | gcp_public_agent_instance_type = "n1-standard-8" 20 | # 21 | # Change public/private subnetworks e.g. "10.65." if you want to run multiple clusters in the same project 22 | gcp_compute_subnetwork_public = "10.64.0.0/22" 23 | gcp_compute_subnetwork_private = "10.64.4.0/22" 24 | # 25 | # Inbound Master Access 26 | admin_cidr = "0.0.0.0/0" 27 | -------------------------------------------------------------------------------- /roles/bootstrap/tasks/init.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: make sure docker-python is not installed via yum 3 | yum: 4 | name: docker-python 5 | state: absent 6 | 7 | - name: install epel 8 | yum: 9 | name: https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm 10 | state: present 11 | 12 | - name: import epel gpg key 13 | rpm_key: 14 | key: /etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-7 15 | state: present 16 | 17 | - name: make sure pip is installed 18 | yum: 19 | name: python-pip 20 | state: present 21 | 22 | - name: make sure docker-py is NOT installed 23 | pip: name="docker-py" state=absent 24 | 25 | - name: make sure docker is installed via pip 26 | pip: name="docker>=3.4" state=present 27 | 28 | - name: clear install directory 29 | file: path={{ dcos_path_bootstrap }} state=absent 30 | 31 | - name: create install directory 32 | file: path={{ dcos_path_bootstrap }} state=directory mode=0755 33 | 34 | - name: create install directory/genconf 35 | file: path={{ dcos_path_bootstrap }}/genconf/ state=directory mode=0755 36 | -------------------------------------------------------------------------------- /docs/DCOS_AGENTS.md: -------------------------------------------------------------------------------- 1 | # Steps to add/remove DC/OS agents On-Premises and on Cloud Providers 2 | 3 | ## On-Premises 4 | 5 | Edit `./hosts.yaml` and fill in the public IP addresses of your cluster agents so that Ansible can reach them: 6 | 7 | ``` 8 | ... 9 | agents: 10 | hosts: 11 | # Public IP Addresses for the Agent Nodes 12 | 1.0.0.3: 13 | 1.0.0.4: 14 | agent_publics: 15 | hosts: 16 | # Public IP Addresses for the Public Agent Nodes 17 | 1.0.0.5: 18 | ... 19 | ``` 20 | 21 | To check that all instances are reachable via Ansible, run the following: 22 | 23 | ```shell 24 | $ ansible all -m ping 25 | ``` 26 | 27 | Finally, apply the Ansible playbook: 28 | 29 | ```shell 30 | $ ansible-playbook plays/install.yml 31 | ``` 32 | 33 | ## Cloud Providers 34 | 35 | Edit file `.deploy/desired_cluster_profile` with required agents count: 36 | 37 | ``` 38 | num_of_private_agents = "3" 39 | num_of_public_agents = "1" 40 | ``` 41 | 42 | Then you can apply the profile with: 43 | 44 | ```shell 45 | $ make launch-infra 46 | $ ansible-playbook -i inventory.py plays/install.yml 47 | ``` 48 | -------------------------------------------------------------------------------- /resources/override.aws.tf: -------------------------------------------------------------------------------- 1 | output "dns_resolvers" { 2 | value = "${var.dcos_resolvers}" 3 | } 4 | 5 | output "cluster_prefix" { 6 | value = "${data.template_file.cluster-name.rendered}" 7 | } 8 | 9 | output "bootstrap_public_ips" { 10 | value = "${aws_instance.bootstrap.public_ip}" 11 | } 12 | 13 | output "bootstrap_private_ips" { 14 | value = "${aws_instance.bootstrap.private_ip}" 15 | } 16 | 17 | output "lb_external_masters" { 18 | value = "${aws_elb.public-master-elb.dns_name}" 19 | } 20 | 21 | output "lb_internal_masters" { 22 | value = "${aws_elb.internal-master-elb.dns_name}" 23 | } 24 | 25 | output "master_public_ips" { 26 | value = ["${aws_instance.master.*.public_ip}"] 27 | } 28 | 29 | output "master_private_ips" { 30 | value = "${aws_instance.master.*.private_ip}" 31 | } 32 | 33 | output "agent_public_ips" { 34 | value = ["${aws_instance.agent.*.public_ip}"] 35 | } 36 | 37 | output "lb_external_agents" { 38 | value = "${aws_elb.public-agent-elb.dns_name}" 39 | } 40 | 41 | output "public_agent_public_ips" { 42 | value = ["${aws_instance.public-agent.*.public_ip}"] 43 | } 44 | 45 | output "dns_search" { 46 | value = "${var.aws_region}.compute.internal" 47 | } 48 | 49 | output "ip_detect" { 50 | value = "aws" 51 | } 52 | -------------------------------------------------------------------------------- /roles/node-install/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This playbook installs all nodes 3 | 4 | - name: create temporary install directory 5 | file: path={{ dcos_path_tmp }} state=directory mode=0755 6 | 7 | - name: download installation file from the bootstrap 8 | get_url: url="http://{{ dcos_bootstrap_ip }}:{{ dcos_port_webserver }}/dcos_install.sh" dest={{ dcos_path_tmp }}/dcos_install.sh mode=0440 force=yes 9 | 10 | - name: check if already installed 11 | find: 12 | paths: /opt/mesosphere 13 | file_type: any 14 | patterns: "*" 15 | register: currently_installed 16 | 17 | - name: install master 18 | shell: bash dcos_install.sh master 19 | args: 20 | chdir: "{{ dcos_path_tmp }}" 21 | when: 22 | - currently_installed.matched|int == 0 23 | - "'masters' in group_names" 24 | 25 | - name: install agent 26 | shell: bash dcos_install.sh slave 27 | args: 28 | chdir: "{{ dcos_path_tmp }}" 29 | when: 30 | - currently_installed.matched|int == 0 31 | - "'agents' in group_names" 32 | 33 | - name: install public agent 34 | shell: bash dcos_install.sh slave_public 35 | args: 36 | chdir: "{{ dcos_path_tmp }}" 37 | when: 38 | - currently_installed.matched|int == 0 39 | - "'agent_publics' in group_names" 40 | -------------------------------------------------------------------------------- /roles/docker/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This playbook installs docker 3 | 4 | - name: enable overlay module 5 | lineinfile: 6 | dest: /etc/modules-load.d/overlay.conf 7 | state: present 8 | create: yes 9 | line: 'overlay' 10 | 11 | - name: load overlay module 12 | modprobe: 13 | name: overlay 14 | state: present 15 | 16 | - name: enable docker yum repo 17 | shell: yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo 18 | changed_when: false 19 | 20 | - name: create docker systemd directory 21 | file: 22 | path: /etc/systemd/system/docker.service.d/ 23 | state: directory 24 | mode: 0755 25 | 26 | - name: configure docker to use overlay driver 27 | template: 28 | src: override.conf.j2 29 | dest: /etc/systemd/system/docker.service.d/override.conf 30 | mode: 0644 31 | force: yes 32 | notify: 33 | - restart docker 34 | 35 | - name: install docker packages 36 | yum: 37 | name: "docker-ce-{{ docker_version }}" 38 | update_cache: yes 39 | state: present 40 | 41 | - name: enable docker 42 | service: 43 | name: docker 44 | enabled: yes 45 | state: started 46 | 47 | - name: allow the default user to use docker 48 | user: 49 | name: "{{ ansible_ssh_user }}" 50 | groups: docker 51 | append: yes 52 | -------------------------------------------------------------------------------- /roles/bootstrap/tasks/upgrade.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This playbook generates the DC/OS upgrade scripts 3 | - name: 'check: required attributes present for update?' 4 | fail: 5 | msg: | 6 | If we don't have all the informations (dcos_upgrade_from_version), 7 | we cannot update the existing DC/OS cluster. 8 | when: > 9 | dcos_upgrade_from_version == '' 10 | 11 | - name: Generate DC/OS upgrade files 12 | shell: "bash dcos_generate_config.sh --generate-node-upgrade-script {{ dcos_upgrade_from_version }}" 13 | args: 14 | chdir: "{{ dcos_path_bootstrap }}" 15 | 16 | - name: Get upgrade directory hash 17 | shell: "ls -td -- */ | head -n 1 | cut -d'/' -f1" 18 | args: 19 | chdir: "{{ dcos_path_bootstrap }}/genconf/serve/upgrade" 20 | register: upgrade_hash 21 | 22 | - name: check upgrade script exists 23 | stat: 24 | path: "{{ dcos_path_bootstrap }}/genconf/serve/upgrade/{{ upgrade_hash.stdout }}/dcos_node_upgrade.sh" 25 | register: sh_stat 26 | 27 | - name: create directory latest 28 | file: path={{ dcos_path_bootstrap }}/genconf/serve/upgrade/latest state=directory mode=0755 29 | 30 | - name: move upgrade script to folder latest 31 | shell: "cp {{ upgrade_hash.stdout }}/dcos_node_upgrade.sh latest/" 32 | args: 33 | chdir: "{{ dcos_path_bootstrap }}/genconf/serve/upgrade" 34 | when: sh_stat.stat.exists 35 | -------------------------------------------------------------------------------- /resources/override.azure.tf: -------------------------------------------------------------------------------- 1 | output "dns_resolvers" { 2 | value = "${var.dcos_resolvers}" 3 | } 4 | 5 | output "cluster_prefix" { 6 | value = "${data.template_file.cluster-name.rendered}" 7 | } 8 | 9 | output "bootstrap_public_ips" { 10 | value = "${azurerm_public_ip.bootstrap_public_ip.fqdn}" 11 | } 12 | 13 | output "bootstrap_private_ips" { 14 | value = "${azurerm_network_interface.bootstrap_nic.private_ip_address}" 15 | } 16 | 17 | output "lb_external_masters" { 18 | value = "${azurerm_public_ip.master_load_balancer_public_ip.fqdn}" 19 | } 20 | 21 | output "lb_internal_masters" { 22 | value = "${azurerm_lb.master_internal_load_balancer.private_ip_address}" 23 | } 24 | 25 | output "master_public_ips" { 26 | value = ["${azurerm_public_ip.master_public_ip.*.fqdn}"] 27 | } 28 | 29 | output "master_private_ips" { 30 | value = "${azurerm_network_interface.master_nic.*.private_ip_address}" 31 | } 32 | 33 | output "agent_public_ips" { 34 | value = ["${azurerm_public_ip.agent_public_ip.*.fqdn}"] 35 | } 36 | 37 | output "lb_external_agents" { 38 | value = "${azurerm_public_ip.public_agent_load_balancer_public_ip.fqdn}" 39 | } 40 | 41 | output "public_agent_public_ips" { 42 | value = ["${azurerm_public_ip.public_agent_public_ip.*.fqdn}"] 43 | } 44 | 45 | output "dns_search" { 46 | value = "None" 47 | } 48 | 49 | output "ip_detect" { 50 | value = "azure" 51 | } 52 | -------------------------------------------------------------------------------- /roles/package/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # this playbook manages DC/OS CLI 3 | 4 | - name: Check if DC/OS CLI is already installed 5 | become: false 6 | find: 7 | paths: '..' 8 | file_type: file 9 | patterns: 'dcos' 10 | register: dcos_cli_installed 11 | when: dcos_cli_enabled 12 | 13 | - name: Setup DC/OS CLI 14 | when: (dcos_cli_enabled and dcos_cli_installed.matched|int == 0) or dcos_cli_upgrade 15 | block: 16 | - name: Download the DC/OS CLI Linux binary (dcos) to your current directory and make it executable 17 | become: false 18 | get_url: url="https://downloads.dcos.io/binaries/cli/linux/x86-64/dcos-{{ dcos_version | regex_replace('^([0-9]{1,2}\\.[0-9]{1,2})(.*)$', '\\1') }}/dcos" dest="../dcos" mode=0755 force=yes 19 | when: ansible_os_family == "Debian" or ansible_os_family == "RedHat" 20 | 21 | - name: Download the DC/OS CLI Darwin binary (dcos) to your current directory and make it executable 22 | become: false 23 | get_url: url="https://downloads.dcos.io/binaries/cli/darwin/x86-64/dcos-{{ dcos_version | regex_replace('^([0-9]{1,2}\\.[0-9]{1,2})(.*)$', '\\1') }}/dcos" dest="../dcos" mode=0755 force=yes 24 | when: ansible_os_family == "Darwin" 25 | 26 | - name: Uninstall DC/OS CLI 27 | become: false 28 | file: 29 | path: '../dcos' 30 | state: absent 31 | when: not dcos_cli_enabled 32 | 33 | - name: Connect to DC/OS cluster 34 | dcos_connection: 35 | url: "https://{{ groups['masters'][0] }}" 36 | insecure: true 37 | -------------------------------------------------------------------------------- /hosts.example.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Example for an ansible inventory file 3 | all: 4 | children: 5 | bootstraps: 6 | hosts: 7 | # Public IP Address of the Bootstrap Node 8 | 1.0.0.1: 9 | masters: 10 | hosts: 11 | # Public IP Addresses for the Master Nodes 12 | 1.0.0.2: 13 | agents: 14 | hosts: 15 | # Public IP Addresses for the Agent Nodes 16 | 1.0.0.3: 17 | 1.0.0.4: 18 | agent_publics: 19 | hosts: 20 | # Public IP Addresses for the Public Agent Nodes 21 | 1.0.0.5: 22 | vars: 23 | # IaaS target for DC/OS deployment 24 | # options: aws, gcp, azure or onprem 25 | dcos_iaas_target: 'onprem' 26 | 27 | # Choose the IP Detect Script 28 | # options: eth0, eth1, ... (or other device name for existing network interface) 29 | dcos_ip_detect_interface: 'eth0' 30 | 31 | # (internal/private) IP Address of the Bootstrap Node 32 | dcos_bootstrap_ip: '2.0.0.1' 33 | 34 | # (internal/private) IP Addresses for the Master Nodes 35 | dcos_master_list: 36 | - 2.0.0.2 37 | 38 | # DNS Resolvers 39 | dcos_resolvers: 40 | - 8.8.4.4 41 | - 8.8.8.8 42 | 43 | # DNS Search Domain 44 | dcos_dns_search: 'None' 45 | 46 | # Internal Loadbalancer DNS for Masters (only needed for exhibitor: aws_s3) 47 | dcos_exhibitor_address: 'masterlb.internal' 48 | 49 | # External Loadbalancer DNS for Masters or 50 | # (external/public) Master Node IP Address (only needed for cli setup) 51 | dcos_master_address: 'masterlb.external' -------------------------------------------------------------------------------- /roles/bootstrap/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - import_tasks: pre-check.yml 3 | 4 | - import_tasks: init.yml 5 | 6 | - name: Download DC/OS installation file 7 | get_url: url="{{ dcos_download }}" dest={{ dcos_path_bootstrap }}/dcos_generate_config.sh mode=0440 force=yes 8 | 9 | - name: Generate IP detection script. 10 | template: src=ip-detect-{{ dcos_iaas_target }}.j2 dest={{ dcos_path_bootstrap }}/genconf/ip-detect mode=0644 11 | 12 | - name: Set Public IP detection script. 13 | template: src=ip-detect-public-{{ dcos_iaas_target }}.j2 dest={{ dcos_path_bootstrap }}/genconf/ip-detect-public mode=0644 14 | when: dcos_iaas_target != "onprem" 15 | 16 | - name: Generate DC/OS configuration 17 | template: src=config.yaml.j2 dest={{ dcos_path_bootstrap }}/genconf/config.yaml mode=0644 18 | 19 | - name: Set Fault Domain detection script 20 | copy: src=fault-domain-detect dest={{ dcos_path_bootstrap }}/genconf/fault-domain-detect mode=0644 21 | when: dcos_iaas_target != "onprem" 22 | 23 | - name: Generate DC/OS bootstrap files 24 | shell: "bash dcos_generate_config.sh" 25 | args: 26 | chdir: "{{ dcos_path_bootstrap }}" 27 | when: dcos_upgrade == False 28 | 29 | - import_tasks: upgrade.yml 30 | when: dcos_upgrade == True 31 | 32 | - name: Start web server to serve the bootstrap files 33 | docker_container: 34 | name: dcos_nginx 35 | image: nginx 36 | state: started 37 | recreate: yes 38 | restart: yes 39 | ports: 40 | - "{{ dcos_port_webserver }}:80" 41 | volumes: 42 | - "{{ dcos_path_bootstrap }}/genconf/serve:/usr/share/nginx/html:ro" 43 | -------------------------------------------------------------------------------- /resources/override.gcp.tf: -------------------------------------------------------------------------------- 1 | output "dns_resolvers" { 2 | value = "${var.dcos_resolvers}" 3 | } 4 | 5 | output "cluster_prefix" { 6 | value = "${data.template_file.cluster-name.rendered}" 7 | } 8 | 9 | output "bootstrap_public_ips" { 10 | value = "${google_compute_instance.bootstrap.network_interface.0.access_config.0.assigned_nat_ip}" 11 | 12 | } 13 | 14 | output "bootstrap_private_ips" { 15 | value = "${google_compute_instance.bootstrap.network_interface.0.address}" 16 | } 17 | 18 | output "lb_external_masters" { 19 | value = "${google_compute_forwarding_rule.external-master-forwarding-rule-http.ip_address}" 20 | } 21 | 22 | output "lb_internal_masters" { 23 | value = "${google_compute_forwarding_rule.internal-master-forwarding-rule.ip_address}" 24 | } 25 | 26 | output "master_public_ips" { 27 | value = ["${google_compute_instance.master.*.network_interface.0.access_config.0.assigned_nat_ip}"] 28 | } 29 | 30 | output "master_private_ips" { 31 | value = "${google_compute_instance.master.*.network_interface.0.address}" 32 | } 33 | 34 | output "agent_public_ips" { 35 | value = ["${google_compute_instance.agent.*.network_interface.0.access_config.0.assigned_nat_ip}"] 36 | } 37 | 38 | output "lb_external_agents" { 39 | value = "${google_compute_forwarding_rule.external-public-agent-forwarding-rule-http.ip_address}" 40 | } 41 | 42 | output "public_agent_public_ips" { 43 | value = ["${google_compute_instance.public-agent.*.network_interface.0.access_config.0.assigned_nat_ip}"] 44 | } 45 | 46 | output "dns_search" { 47 | value = "None" 48 | } 49 | 50 | output "ip_detect" { 51 | value = "gcp" 52 | } 53 | -------------------------------------------------------------------------------- /group_vars/all.example: -------------------------------------------------------------------------------- 1 | --- 2 | # Install latest operating system updates 3 | os_system_updates: False 4 | 5 | # DC/OS cluster version 6 | dcos_version: '1.11.4' 7 | 8 | # If planning to upgrade a previous deployed DC/OS Cluster, 9 | # uncomment the following variable 10 | #dcos_upgrade_from_version: '1.11.3' 11 | 12 | # Download URL for DC/OS 13 | dcos_download: "https://downloads.dcos.io/dcos/stable/{{ dcos_version }}/dcos_generate_config.sh" 14 | 15 | # Name of the DC/OS Cluster 16 | dcos_cluster_name: 'demo' 17 | 18 | # Deploy Mesosphere Enterprise DC/OS or DC/OS OSS? 19 | dcos_deploy_ee_package: False 20 | 21 | # Optional if dcos_iaas_target := aws 22 | #dcos_exhibitor: 'aws_s3' 23 | #dcos_exhibitor_explicit_keys: true 24 | #dcos_aws_access_key_id: '******' 25 | #dcos_aws_secret_access_key: '******' 26 | #dcos_aws_region: 'us-west-2' 27 | #dcos_s3_bucket: 'bucket-name' 28 | 29 | # Optional if dcos_iaas_target := azure 30 | #dcos_exhibitor: 'azure' 31 | #dcos_exhibitor_azure_account_name: 'name' 32 | #dcos_exhibitor_azure_account_key: '******' 33 | 34 | # Only required when deploying Mesosphere Enterprise DC/OS 35 | dcos_ee_security: 'permissive' 36 | dcos_ee_license_key_contents: '******' 37 | dcos_ee_superuser_username: admin 38 | # Default password:= admin 39 | dcos_ee_superuser_password_hash: "$6$rounds=656000$8CXbMqwuglDt3Yai$ZkLEj8zS.GmPGWt.dhwAv0.XsjYXwVHuS9aHh3DMcfGaz45OpGxC5oQPXUUpFLMkqlXCfhXMloIzE0Xh8VwHJ." 40 | 41 | # Configure rexray to enable support of external volumes (only for Mesosphere Enterprise DC/OS) 42 | # Note: Set rexray_config_method: file and edit ./roles/bootstrap/templates/rexray.yaml.j2 for a custom rexray configuration 43 | # options: empty, file 44 | dcos_ee_rexray_config_method: empty 45 | -------------------------------------------------------------------------------- /roles/common/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: update all packages 3 | yum: 4 | name: "*" 5 | state: latest 6 | exclude: docker-ce,docker-engine,docker-engine-selinux 7 | when: os_system_updates 8 | 9 | - name: install system utilities 10 | yum: 11 | name: "{{ item }}" 12 | state: latest 13 | with_items: 14 | - tar 15 | - xz 16 | - unzip 17 | - curl 18 | - ipset 19 | - chrony 20 | - firewalld 21 | - dnsmasq 22 | - yum-utils 23 | - net-tools 24 | - bind-utils 25 | - bash 26 | - coreutils 27 | - gawk 28 | - gettext 29 | - grep 30 | - iproute 31 | - util-linux 32 | - sed 33 | 34 | - name: disable selinux 35 | selinux: 36 | policy: targeted 37 | state: permissive 38 | 39 | - name: create group 'nogroup' 40 | group: 41 | name: nogroup 42 | state: present 43 | 44 | - name: ensure ipv6 is not disabled 45 | sysctl: 46 | name: "{{ item }}" 47 | value: "0" 48 | state: present 49 | with_items: 50 | - net.ipv6.conf.all.disable_ipv6 51 | - net.ipv6.conf.default.disable_ipv6 52 | - net.ipv6.conf.lo.disable_ipv6 53 | 54 | - name: enable chronyd 55 | service: 56 | name: chronyd 57 | enabled: yes 58 | state: started 59 | 60 | - name: disable firewalld 61 | service: 62 | name: firewalld 63 | enabled: no 64 | state: stopped 65 | 66 | - name: disable dnsmasq 67 | service: 68 | name: dnsmasq 69 | enabled: no 70 | state: stopped 71 | 72 | - name: check if time is synchronized 73 | shell: "timedatectl status | grep 'NTP synchronized: yes'" 74 | register: timedatectl_result 75 | until: timedatectl_result.rc == 0 76 | changed_when: timedatectl_result.rc != 0 77 | retries: 5 78 | delay: 10 -------------------------------------------------------------------------------- /plays/uninstall.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This playbook removes DC/OS 3 | 4 | - hosts: [ masters, agents, agent_publics] 5 | become: true 6 | tasks: 7 | 8 | - name: DC/OS uninstall 9 | shell: /opt/mesosphere/bin/dcos-path/dcos-shell /opt/mesosphere/bin/pkgpanda uninstall 10 | ignore_errors: true 11 | 12 | - name: remove DC/OS configuration 13 | file: 14 | path: "{{ item }}" 15 | state: absent 16 | with_items: 17 | - /opt/mesosphere 18 | - /etc/mesosphere 19 | - /var/lib/mesosphere 20 | - /var/lib/dcos 21 | - /var/lib/zookeeper 22 | - /var/lib/mesos 23 | - /tmp/dcos/ 24 | - /run/dcos/ 25 | - /run/mesos 26 | - /var/log/mesos 27 | - /etc/profile.d/dcos.sh 28 | - /etc/systemd/journald.conf.d/dcos.conf 29 | - /etc/systemd/system/dcos-download.service 30 | - /etc/systemd/system/dcos-link-env.service 31 | - /etc/systemd/system/dcos-setup.service 32 | - /etc/systemd/system/multi-user.target.wants/dcos-setup.service 33 | - /etc/systemd/system/multi-user.target.wants/dcos.target 34 | - /etc/systemd/system/dcos.target 35 | - /etc/systemd/system/dcos.target.wants 36 | ignore_errors: true 37 | 38 | - name: reload systemd 39 | shell: systemctl daemon-reload 40 | ignore_errors: true 41 | 42 | - name: reset nameserver 43 | shell: "echo 'nameserver 8.8.8.8' > /etc/resolv.conf" 44 | 45 | - hosts: bootstraps 46 | become: true 47 | tasks: 48 | 49 | - name: Stop Nginx 50 | docker_container: 51 | name: dcos_nginx 52 | state: absent 53 | 54 | - name: Clear install directory 55 | file: path=/var/lib/dcos-bootstrap state=absent 56 | ignore_errors: true 57 | -------------------------------------------------------------------------------- /roles/bootstrap/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dcos_download: '' 3 | 4 | # Name of the DC/OS Cluster 5 | dcos_cluster_name: '' 6 | 7 | # If planning to upgrade a previous deployed DC/OS Cluster 8 | dcos_upgrade: False 9 | dcos_upgrade_from_version: '' 10 | 11 | dcos_path_bootstrap: '/var/lib/dcos-bootstrap' 12 | dcos_path_tmp: '/tmp/dcos' 13 | dcos_port_webserver: 8080 14 | 15 | # Deploy Mesosphere Enterprise DC/OS or DC/OS OSS? 16 | dcos_deploy_ee_package: False 17 | 18 | dcos_ip_detect_interface: '' 19 | 20 | # IP Address of the Bootstrap Node (internal/private) 21 | dcos_bootstrap_ip: '' 22 | 23 | # (internal/private) IP Addresses for the Master Nodes 24 | dcos_master_list: [] 25 | 26 | # External Loadbalancer DNS for Masters or 27 | # (external/public) Master Node IP Address (only needed for cli setup) 28 | dcos_master_address: 'masterlb.external' 29 | 30 | dcos_resolvers: 31 | - 8.8.4.4 32 | - 8.8.8.8 33 | 34 | dcos_dns_search: None 35 | 36 | # IaaS target for DC/OS deployment 37 | # options: aws, gcp, azure or onprem 38 | dcos_iaas_target: '' 39 | 40 | # Configuration for the Exhibitor Storage Backend 41 | # options: aws_s3, static, azure 42 | dcos_exhibitor: 'static' 43 | 44 | dcos_exhibitor_address: '' 45 | 46 | # AWS S3 Credentials (only needed for exhibitor: aws_s3) 47 | dcos_exhibitor_explicit_keys: false 48 | dcos_aws_access_key_id: '' 49 | dcos_aws_secret_access_key: '' 50 | dcos_aws_region: '' 51 | dcos_s3_bucket: '' 52 | dcos_s3_prefix: '' 53 | 54 | # Azure Credentials (only needed for exhibitor: azure) 55 | dcos_exhibitor_azure_account_name: '' 56 | dcos_exhibitor_azure_account_key: '' 57 | dcos_exhibitor_azure_prefix: '' 58 | 59 | # Only required when deploying Mesosphere Enterprise DC/OS 60 | dcos_ee_security: 'permissive' 61 | dcos_ee_license_key_contents: '' 62 | dcos_ee_superuser_username: '' 63 | dcos_ee_superuser_password_hash: '' 64 | 65 | # Configure rexray to enable support of external volumes 66 | # Note: Set rexray_config_method: file and edit ./roles/bootstrap/templates/rexray.yaml.j2 for a custom rexray configuration 67 | # options: empty, file 68 | dcos_ee_rexray_config_method: empty 69 | -------------------------------------------------------------------------------- /roles/bootstrap/tasks/pre-check.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Check all required attributes are present?' 3 | fail: 4 | msg: | 5 | If we don't have all the informations (dcos_download, dcos_cluster_name, dcos_iaas_target), 6 | we cannot install DC/OS cluster. 7 | when: > 8 | dcos_download == '' or 9 | dcos_iaas_target == '' or 10 | dcos_cluster_name == '' 11 | 12 | - name: Check conditional requirements are present for IaaS (AWS)?' 13 | fail: 14 | msg: | 15 | Please check that the following attributes (dcos_aws_secret_access_key, 16 | dcos_aws_region, dcos_s3_bucket, dcos_s3_prefix) are provided, 17 | so we can install DC/OS cluster. 18 | when: > 19 | dcos_exhibitor == 'aws_s3' and 20 | ( dcos_aws_region == '' or dcos_s3_bucket == '' or 21 | dcos_exhibitor_address == '' or dcos_s3_prefix == '' ) 22 | 23 | - name: Check conditional requirements are present for IaaS (Azure)?' 24 | fail: 25 | msg: | 26 | Please check that the following attributes (dcos_exhibitor_address, 27 | dcos_exhibitor_azure_account_name, dcos_exhibitor_azure_account_key, 28 | dcos_exhibitor_azure_prefix) are provided, 29 | so we can install DC/OS cluster. 30 | when: > 31 | dcos_exhibitor == 'azure' and 32 | ( dcos_exhibitor_address == '' or 33 | dcos_exhibitor_azure_account_name == '' or 34 | dcos_exhibitor_azure_account_key == '' or 35 | dcos_exhibitor_azure_prefix == '' ) 36 | 37 | - name: Check conditional requirements are present for OnPrem ?' 38 | fail: 39 | msg: | 40 | Please check that the following attributes (dcos_ip_detect_interface) are provided, 41 | so we can install DC/OS cluster. 42 | when: > 43 | dcos_iaas_target == 'onprem' and 44 | dcos_ip_detect_interface == '' 45 | 46 | - name: Check conditional requirements are present for DC/OS EE ?' 47 | fail: 48 | msg: | 49 | Please check that the following attributes (dcos_ee_license_key_contents, 50 | dcos_ee_superuser_username, dcos_ee_superuser_password_hash) are provided, 51 | so we can install DC/OS cluster. 52 | when: > 53 | dcos_deploy_ee_package == True and ( 54 | dcos_ee_license_key_contents == '' or 55 | dcos_ee_superuser_username == '' or 56 | dcos_ee_superuser_password_hash == '' ) 57 | -------------------------------------------------------------------------------- /roles/bootstrap/templates/config.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | bootstrap_url: http://{{ dcos_bootstrap_ip }}:{{ dcos_port_webserver }} 3 | cluster_name: '{{ dcos_cluster_name }}' 4 | 5 | {% if dcos_exhibitor == "static" %} 6 | exhibitor_storage_backend: static 7 | master_discovery: static 8 | master_list: 9 | {% for master in dcos_master_list %} 10 | - {{ master }}{{ '\n' }} 11 | {%- endfor %} 12 | {% endif %} 13 | 14 | master_external_loadbalancer: {{ dcos_master_address }} 15 | 16 | {% if dcos_exhibitor == "aws_s3" %} 17 | exhibitor_storage_backend: aws_s3 18 | exhibitor_address: {{ dcos_exhibitor_address }} 19 | 20 | 21 | {% if dcos_exhibitor_explicit_keys == True %} 22 | exhibitor_explicit_keys: 'true' 23 | aws_access_key_id: {{ dcos_aws_access_key_id }} 24 | aws_secret_access_key: {{ dcos_aws_secret_access_key }} 25 | {% else %} 26 | exhibitor_explicit_keys: 'false' 27 | {% endif %} 28 | aws_region: {{ dcos_aws_region }} 29 | s3_bucket: {{ dcos_s3_bucket }} 30 | s3_prefix: {{ dcos_s3_prefix }} 31 | master_discovery: master_http_loadbalancer 32 | num_masters: {{ groups['masters'] | length }} 33 | {% endif %} 34 | 35 | {% if dcos_exhibitor == "azure" %} 36 | exhibitor_storage_backend: azure 37 | exhibitor_address: {{ dcos_exhibitor_address }} 38 | exhibitor_azure_account_name: {{ dcos_exhibitor_azure_account_name }} 39 | exhibitor_azure_account_key: {{ dcos_exhibitor_azure_account_key }} 40 | exhibitor_azure_prefix: {{ dcos_exhibitor_azure_prefix }} 41 | master_discovery: master_http_loadbalancer 42 | num_masters: {{ groups['masters'] | length }} 43 | {% endif %} 44 | 45 | resolvers: 46 | {% for resolver in dcos_resolvers %} 47 | - {{ resolver }}{{ '\n' }} 48 | {%- endfor %} 49 | 50 | dns_search: {{ dcos_dns_search }} 51 | 52 | {% if dcos_iaas_target != "onprem" %} 53 | ip_detect_public_filename: /genconf/ip-detect-public 54 | {% endif %} 55 | 56 | # (only for Mesosphere Enterprise DC/OS) 57 | {% if dcos_deploy_ee_package == True %} 58 | superuser_username: {{ dcos_ee_superuser_username }} 59 | superuser_password_hash: {{ dcos_ee_superuser_password_hash }} 60 | license_key_contents: {{ dcos_ee_license_key_contents }} 61 | security: {{ dcos_ee_security }} 62 | {% if dcos_iaas_target != "onprem" %} 63 | fault_domain_enabled: true 64 | {% else %} 65 | fault_domain_enabled: false 66 | {% endif %} 67 | {% endif %} 68 | 69 | {% if dcos_ee_rexray_config_method == 'file' %} 70 | {% include "rexray.yaml.j2" %} 71 | {% endif %} 72 | -------------------------------------------------------------------------------- /roles/package/kubernetes/tasks/kubectl.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This playbook connects from the kubectl CLI on your ansible control machine to your kubernetes cluster 3 | 4 | - name: check if k8s CLI is already installed 5 | become: false 6 | find: 7 | paths: '..' 8 | file_type: file 9 | patterns: 'kubectl' 10 | register: cli_k8s_installed 11 | when: dcos_k8s_enabled 12 | 13 | - name: install kubectl 14 | when: (cli_k8s_installed.matched|int == 0 or dcos_cli_upgrade) and dcos_k8s_enabled 15 | block: 16 | - name: create temporay file 17 | become: false 18 | get_url: url="https://storage.googleapis.com/kubernetes-release/release/stable.txt" dest="../kubectl-version" mode=0744 force=yes 19 | 20 | - name: detect kubernetes version 21 | shell: cat "../kubectl-version" 22 | register: kubernetes_version 23 | 24 | - name: download the k8s CLI Linux binary (kubectl) to your current directory and make it executable 25 | become: false 26 | get_url: url="https://storage.googleapis.com/kubernetes-release/release/{{ kubernetes_version.stdout }}/bin/linux/amd64/kubectl" dest="../kubectl" mode=0755 force=yes 27 | when: ansible_os_family == "Debian" or ansible_os_family == "RedHat" 28 | 29 | - name: download the k8s CLI Darwin binary (kubectl) to your current directory and make it executable 30 | become: false 31 | get_url: url="https://storage.googleapis.com/kubernetes-release/release/{{ kubernetes_version.stdout }}/bin/darwin/amd64/kubectl" dest="../kubectl" mode=0755 force=yes 32 | when: ansible_os_family == "Darwin" 33 | 34 | - name: clean-up temporay file 35 | become: false 36 | file: 37 | path: '../kubectl-version' 38 | state: absent 39 | 40 | - name: configure kubernetes on DC/OS for common kubectl CLI commands 41 | command: "dcos package install kubernetes --package-version {{ dcos_k8s_package_version }} --yes --cli" 42 | 43 | - name: wait for kubernetes to come up 44 | uri: 45 | url: "https://{{ groups['agent_publics'][0] }}:6443" 46 | validate_certs: no 47 | retries: 60 48 | delay: 10 49 | register: result 50 | until: ('status' in result) and ((result.status == 403) or (result.status == 401)) 51 | changed_when: false 52 | failed_when: false 53 | 54 | - name: configure kubernetes on DC/OS for common kubectl CLI commands 55 | command: "dcos kubernetes kubeconfig --apiserver-url https://{{ groups['agent_publics'][0] }}:6443 --insecure-skip-tls-verify" 56 | -------------------------------------------------------------------------------- /roles/package/kubernetes/tasks/kubernetes-proxy.yml: -------------------------------------------------------------------------------- 1 | # This playbook installs the Kubernetes Proxy to etasblish a connection for kubectl 2 | 3 | - name: Create a group for Marathon-LB 4 | dcos_iam_group: 5 | gid: marathon-lb-group 6 | description: Permissions for Marathon-LB 7 | state: present 8 | permissions: 9 | - rid: dcos:service:marathon:marathon:services:/ 10 | action: read 11 | - rid: dcos:service:marathon:marathon:admin:events 12 | action: read 13 | when: dcos_deploy_ee_package 14 | 15 | - name: Create service account for Marathon-LB 16 | dcos_iam_serviceaccount: 17 | sid: marathon-lb 18 | description: 'Marathon-lb Service Account' 19 | secret_path: marathon-lb/secret 20 | groups: 21 | - marathon-lb-group 22 | state: present 23 | when: dcos_deploy_ee_package 24 | 25 | - name: Ensure Marathon-LB is installed 26 | dcos_package: 27 | name: marathon-lb 28 | app_id: marathon-lb 29 | version: 1.12.2 30 | state: present 31 | options: 32 | marathon-lb: 33 | secret_name: "marathon-lb/secret" 34 | marathon-uri: "https://marathon.mesos:8443" 35 | bind-http-https: false 36 | instances: 1 37 | when: dcos_deploy_ee_package 38 | 39 | - name: Ensure Marathon-LB is installed 40 | dcos_package: 41 | name: marathon-lb 42 | app_id: marathon-lb 43 | version: 1.12.2 44 | state: present 45 | options: 46 | marathon-lb: 47 | instances: 1 48 | when: not dcos_deploy_ee_package 49 | 50 | - name: Deploy Kubectl Proxy via Marathon 51 | dcos_marathon: 52 | app_id: "{{ dcos_k8s_proxy_app_id }}" 53 | state: present 54 | options: 55 | { 56 | "instances": 1, 57 | "cpus": 0.001, 58 | "mem": 16, 59 | "cmd": "tail -F /dev/null", 60 | "container": { 61 | "type": "MESOS" 62 | }, 63 | "portDefinitions": [ 64 | { 65 | "protocol": "tcp", 66 | "port": 0 67 | } 68 | ], 69 | "labels": { 70 | "HAPROXY_GROUP": "external", 71 | "HAPROXY_0_MODE": "http", 72 | "HAPROXY_0_PORT": "6443", 73 | "HAPROXY_0_SSL_CERT": "/etc/ssl/cert.pem", 74 | "HAPROXY_0_BACKEND_SERVER_OPTIONS": " timeout connect 10s\n timeout client 86400s\n timeout server 86400s\n timeout tunnel 86400s\n server kube-apiserver apiserver.{{ dcos_k8s_app_id }}.l4lb.thisdcos.directory:6443 ssl verify none\n" 75 | } 76 | } -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # DEPRECATED - Deploy DC/OS using Ansible 2 | 3 | *Notice:* This repo was deprecated. Please consider using the Ansible Roles for DC/OS maintained by the Mesosphere SRE team: https://github.com/dcos/dcos-ansible 4 | 5 | ## Overview 6 | 7 | Infrastructure for Cloud Providers is bootstrapped with Terraform. 8 | 9 | Ansible playbook installs Open or Enterprise DC/OS to infrastucture On-Premises/Cloud Providers and is supposed to run on CentOS 7. 10 | The installation steps are based on the [Advanced Installation Guide][mesosphere-install] of DC/OS. 11 | 12 | ## Getting Started 13 | 14 | All development is done on the master branch. Tested versions are identified via git tags. To get started, you can clone or fork this repo: 15 | 16 | ```shell 17 | git clone https://github.com/dcos-labs/ansible-dcos 18 | ``` 19 | 20 | Use `git tag` to list all versions: 21 | 22 | ```shell 23 | git tag 24 | v0.7.0-dcos-1.11 25 | ``` 26 | 27 | Check out the latest version with: 28 | 29 | ```shell 30 | git checkout v0.7.0-dcos-1.11 31 | ``` 32 | 33 | ## Install 34 | 35 | Here are guides to follow to install the DC/OS cluster: 36 | 37 | * [On-Premises with Ansible](docs/INSTALL_ONPREM.md) 38 | * [On AWS with Terraform/Ansible](docs/INSTALL_AWS.md) 39 | * [On Azure with Terraform/Ansible](docs/INSTALL_AZURE.md) 40 | * [On GCP with Terraform/Ansible](docs/INSTALL_GCP.md) 41 | 42 | Here are guides to follow to install `framework as-a-service`: 43 | 44 | * [Kubernetes as-a-Service](docs/INSTALL_KUBERNETES.md) 45 | 46 | ## Operational tasks 47 | 48 | Upgrade the DC/OS cluster: 49 | 50 | * [Upgrade DC/OS](docs/UPGRADE_DCOS.md) 51 | 52 | Change number of DC/OS agents: 53 | 54 | * [Add/remove DC/OS agents](docs/DCOS_AGENTS.md) 55 | 56 | Upgrade the Kubernetes cluster: 57 | 58 | * [Upgrade Kubernetes as-a-Service](docs/INSTALL_KUBERNETES.md#upgrade-kubernetes-on-dcos-package) 59 | 60 | ## Documentation 61 | 62 | All documentation for this project is located in the [docs](docs/) directory at the root of this repository. 63 | 64 | ## Acknowledgements 65 | 66 | Current maintainers: 67 | 68 | * [Jan Repnak][github-jrx] 69 | * [Rimas Mocevicius][github-rimusz] 70 | 71 | ## Roadmap 72 | 73 | - [X] Support for On-Premises 74 | - [X] Support for AWS 75 | - [X] Support for Azure 76 | - [X] Support for GCP 77 | - [X] Support Kubernetes as-a-service 78 | 79 | ## License 80 | [DC/OS][github-dcos], along with this project, are both open source software released under the 81 | [Apache Software License, Version 2.0](LICENSE). 82 | 83 | [mesosphere-install]: https://docs.mesosphere.com/latest/installing/ent/custom/advanced/ 84 | [github-dcos]: https://github.com/dcos/dcos 85 | [github-jrx]: https://github.com/jrx 86 | [github-rimusz]: https://github.com/rimusz 87 | -------------------------------------------------------------------------------- /docs/INSTALL_ONPREM.md: -------------------------------------------------------------------------------- 1 | # Steps for DC/OS installation with Ansible On-Premises 2 | 3 | With the following guide, you are able to install a DC/OS cluster on premises. You need the Ansible tool installed. 4 | On MacOS, you can use [brew](https://brew.sh/) for that. 5 | 6 | ```shell 7 | $ brew install ansible 8 | ``` 9 | 10 | ## Setup infrastructure 11 | 12 | Copy `./hosts.example.yaml` to `./hosts.yaml` and fill in the public IP addresses of your cluster so that Ansible can reach them and additionally set for the variables `dcos_bootstrap_ip` and `dcos_master_list` the private/internal IP addresses for cluster-internal communication. For example: 13 | 14 | ``` 15 | --- 16 | # Example for an ansible inventory file 17 | all: 18 | children: 19 | bootstraps: 20 | hosts: 21 | # Public IP Address of the Bootstrap Node 22 | 1.0.0.1: 23 | masters: 24 | hosts: 25 | # Public IP Addresses for the Master Nodes 26 | 1.0.0.2: 27 | agents: 28 | hosts: 29 | # Public IP Addresses for the Agent Nodes 30 | 1.0.0.3: 31 | 1.0.0.4: 32 | agent_publics: 33 | hosts: 34 | # Public IP Addresses for the Public Agent Nodes 35 | 1.0.0.5: 36 | vars: 37 | # IaaS target for DC/OS deployment 38 | # options: aws, gcp, azure or onprem 39 | dcos_iaas_target: 'onprem' 40 | 41 | # Choose the IP Detect Script 42 | # options: eth0, eth1, ... (or other device name for existing network interface) 43 | dcos_ip_detect_interface: 'eth0' 44 | 45 | # (internal/private) IP Address of the Bootstrap Node 46 | dcos_bootstrap_ip: '2.0.0.1' 47 | 48 | # (internal/private) IP Addresses for the Master Nodes 49 | dcos_master_list: 50 | - 2.0.0.2 51 | 52 | # DNS Resolvers 53 | dcos_resolvers: 54 | - 8.8.4.4 55 | - 8.8.8.8 56 | 57 | # DNS Search Domain 58 | dcos_dns_search: 'None' 59 | 60 | # Internal Loadbalancer DNS for Masters (only needed for exhibitor: aws_s3) 61 | dcos_exhibitor_address: 'masterlb.internal' 62 | 63 | # External Loadbalancer DNS for Masters or 64 | # (external/public) Master Node IP Address (only needed for cli setup) 65 | dcos_master_address: 'masterlb.external' 66 | ``` 67 | 68 | The setup variables for DC/OS are defined in the file `group_vars/all/vars`. Copy the example files, by running: 69 | 70 | ```shell 71 | $ cp group_vars/all.example group_vars/all 72 | ``` 73 | 74 | The now created file `group_vars/all` is for configuring DC/OS and common variables. The variables are explained within the files. 75 | 76 | ### Configure your ssh Keys 77 | 78 | Applying the Ansible playbook `ansible-playbook plays/access-onprem.yml` ([see doc](ACCESS_ONPREM.md)) to be able to access your cluster nodes via SSH. 79 | 80 | ## Install DC/OS 81 | 82 | To check that all instances are reachable via Ansible, run the following: 83 | 84 | ```shell 85 | $ ansible all -m ping 86 | ``` 87 | 88 | Finally, you can install DC/OS by applying the Ansible playbook: 89 | 90 | ```shell 91 | $ ansible-playbook plays/install.yml 92 | ``` 93 | -------------------------------------------------------------------------------- /roles/package/action_plugins/common.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | import os 3 | 4 | from ansible.errors import AnsibleActionFail 5 | 6 | try: 7 | from __main__ import display 8 | except ImportError: 9 | from ansible.utils.display import Display 10 | display = Display() 11 | 12 | def _version(v): 13 | return tuple(map(int, v.split('.'))) 14 | 15 | def _dcos_path(): 16 | dcos_path = os.environ.copy() 17 | dcos_path["PATH"] = os.getcwd() + ':' + dcos_path["PATH"] 18 | display.vvv('dcos cli: path environment variable: {}'.format(dcos_path["PATH"]) ) 19 | return dcos_path 20 | 21 | def ensure_dcos(): 22 | """Check whether the dcos cli is installed.""" 23 | 24 | try: 25 | r = subprocess.check_output(['dcos', '--version'], env=_dcos_path()).decode() 26 | except subprocess.CalledProcessError: 27 | raise AnsibleActionFail("DC/OS CLI is not installed!") 28 | 29 | raw_version = '' 30 | for line in r.strip().split('\n'): 31 | display.vvv(line) 32 | k, v = line.split('=') 33 | if k == 'dcoscli.version': 34 | raw_version = v 35 | 36 | v = _version(raw_version) 37 | if v < (0, 5, 0): 38 | raise AnsibleActionFail( 39 | "DC/OS CLI 0.5.x is required, found {}".format(v)) 40 | if v >= (0, 7, 0): 41 | raise AnsibleActionFail( 42 | "DC/OS CLI version > 0.7.x detected, may not work") 43 | display.vvv("dcos: all prerequisites seem to be in order") 44 | 45 | def ensure_dcos_security(): 46 | """Check whether the dcos[cli] security extension is installed.""" 47 | 48 | raw_version = '' 49 | try: 50 | r = subprocess.check_output(['dcos', 'security', '--version'], env=_dcos_path()).decode() 51 | except: 52 | display.vvv("dcos security: not installed") 53 | install_dcos_security_cli() 54 | r = subprocess.check_output(['dcos', 'security', '--version'], env=_dcos_path()).decode() 55 | 56 | v = _version(r) 57 | if v < (1, 2, 0): 58 | raise AnsibleActionFail( 59 | "DC/OS Security CLI 1.2.x is required, found {}".format(v)) 60 | 61 | display.vvv("dcos security: all prerequisites seem to be in order") 62 | 63 | def install_dcos_security_cli(): 64 | """Install DC/OS Security CLI""" 65 | display.vvv("dcos security: installing cli") 66 | 67 | cmd = [ 68 | 'dcos', 'package', 'install', 'dcos-enterprise-cli', '--cli', '--yes' 69 | ] 70 | display.vvv(subprocess.check_output(cmd, env=_dcos_path()).decode()) 71 | 72 | def run_command(cmd, description='run command', stop_on_error=False, input=None): 73 | """Run a command and catch exceptions for Ansible.""" 74 | display.vvv("command: " + ' '.join(cmd)) 75 | 76 | from subprocess import CalledProcessError, check_output 77 | 78 | try: 79 | output = check_output(cmd, env=_dcos_path(),stderr=subprocess.STDOUT) 80 | #output = check_output(cmd, env=_dcos_path()) 81 | returncode = 0 82 | except CalledProcessError as e: 83 | output = e.output 84 | returncode = e.returncode 85 | if stop_on_error and returncode != 0: 86 | raise AnsibleActionFail('Failed to {}: {}'.format(description, e)) 87 | 88 | return output 89 | -------------------------------------------------------------------------------- /plays/access-onprem.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This playbook enable access to all ansible targets via ssh 3 | 4 | - name: setup the ansible requirements on all nodes 5 | hosts: all:!localhost 6 | serial: 20 7 | remote_user: "{{ initial_remote_user | default('root') }}" 8 | become: true 9 | tasks: 10 | 11 | - name: attempt to update apt's cache 12 | raw: test -e /usr/bin/apt-get && apt-get update 13 | ignore_errors: yes 14 | 15 | - name: attempt to install Python on Debian-based systems 16 | raw: test -e /usr/bin/apt-get && apt-get -y install python-simplejson python 17 | ignore_errors: yes 18 | 19 | - name: attempt to install Python on CentOS-based systems 20 | raw: test -e /usr/bin/yum && yum -y install python-simplejson python 21 | ignore_errors: yes 22 | 23 | - name: Create admin user group 24 | group: 25 | name: admin 26 | system: yes 27 | state: present 28 | 29 | - name: Ensure sudo is installed 30 | package: 31 | name: sudo 32 | state: present 33 | 34 | - name: Create Ansible user 35 | user: 36 | name: "{{ lookup('ini', 'remote_user section=defaults file=../ansible.cfg') }}" 37 | shell: /bin/bash 38 | comment: "Ansible management user" 39 | home: "/home/{{ lookup('ini', 'remote_user section=defaults file=../ansible.cfg') }}" 40 | createhome: yes 41 | 42 | - name: Add Ansible user to admin group 43 | user: 44 | name: "{{ lookup('ini', 'remote_user section=defaults file=../ansible.cfg') }}" 45 | groups: admin 46 | append: yes 47 | 48 | - name: Add authorized key 49 | authorized_key: 50 | user: "{{ lookup('ini', 'remote_user section=defaults file=../ansible.cfg') }}" 51 | state: present 52 | key: "{{ lookup('file', lookup('env','HOME') + '/.ssh/ansible-dcos.pub') }}" 53 | 54 | - name: Copy sudoers file 55 | command: cp -f /etc/sudoers /etc/sudoers.tmp 56 | 57 | - name: Backup sudoers file 58 | command: cp -f /etc/sudoers /etc/sudoers.bak 59 | 60 | - name: Ensure admin group can sudo 61 | lineinfile: 62 | dest: /etc/sudoers.tmp 63 | state: present 64 | regexp: '^%admin' 65 | line: '%admin ALL=(ALL) NOPASSWD: ALL' 66 | when: ansible_os_family == 'Debian' 67 | 68 | - name: Ensure admin group can sudo 69 | lineinfile: 70 | dest: /etc/sudoers.tmp 71 | state: present 72 | regexp: '^%admin' 73 | insertafter: '^root' 74 | line: '%admin ALL=(ALL) NOPASSWD: ALL' 75 | when: ansible_os_family == 'RedHat' 76 | 77 | - name: Replace sudoers file 78 | shell: visudo -q -c -f /etc/sudoers.tmp && cp -f /etc/sudoers.tmp /etc/sudoers 79 | 80 | - name: Test Ansible user's access 81 | local_action: "shell ssh {{ lookup('ini', 'remote_user section=defaults file=../ansible.cfg') }}@{{ ansible_host }} 'sudo echo success'" 82 | become: False 83 | register: ansible_success 84 | 85 | - name: Remove Ansible SSH key from bootstrap user's authorized keys 86 | lineinfile: 87 | path: "{{ ansible_env.HOME }}/.ssh/authorized_keys" 88 | state: absent 89 | regexp: '^ssh-rsa AAAAB3N' 90 | when: ansible_success.stdout == "success" 91 | -------------------------------------------------------------------------------- /roles/package/kubernetes/tasks/kubernetes.yml: -------------------------------------------------------------------------------- 1 | # This playbook installs the Kubernetes package 2 | 3 | - name: Create a group for Kubernetes 4 | dcos_iam_group: 5 | gid: "{{ dcos_k8s_app_id }}-group" 6 | description: Permissions for Kubernetes 7 | state: present 8 | permissions: 9 | - rid: "dcos:mesos:master:framework:role:*" 10 | action: read 11 | - rid: "dcos:mesos:master:framework:role:{{ dcos_k8s_app_id }}-role" 12 | action: create 13 | - rid: "dcos:mesos:master:task:user:root" 14 | action: create 15 | - rid: "dcos:mesos:agent:task:user:root" 16 | action: create 17 | - rid: "dcos:mesos:master:reservation:role:{{ dcos_k8s_app_id }}-role" 18 | action: create 19 | - rid: "dcos:mesos:master:reservation:principal:{{ dcos_k8s_app_id }}" 20 | action: delete 21 | - rid: "dcos:mesos:master:volume:role:{{ dcos_k8s_app_id }}-role" 22 | action: create 23 | - rid: "dcos:mesos:master:volume:principal:{{ dcos_k8s_app_id }}" 24 | action: delete 25 | - rid: "dcos:service:marathon:marathon:services:/" 26 | action: create 27 | - rid: "dcos:service:marathon:marathon:services:/" 28 | action: delete 29 | - rid: "dcos:secrets:default:/{{ dcos_k8s_app_id }}/*" 30 | action: full 31 | - rid: "dcos:secrets:list:default:/{{ dcos_k8s_app_id }}" 32 | action: read 33 | - rid: "dcos:adminrouter:ops:ca:rw" 34 | action: full 35 | - rid: "dcos:adminrouter:ops:ca:ro" 36 | action: full 37 | - rid: "dcos:mesos:master:framework:role:slave_public/{{ dcos_k8s_app_id }}-role" 38 | action: create 39 | - rid: "dcos:mesos:master:framework:role:slave_public/{{ dcos_k8s_app_id }}-role" 40 | action: read 41 | - rid: "dcos:mesos:master:reservation:role:slave_public/{{ dcos_k8s_app_id }}-role" 42 | action: create 43 | - rid: "dcos:mesos:master:volume:role:slave_public/{{ dcos_k8s_app_id }}-role" 44 | action: create 45 | - rid: "dcos:mesos:master:framework:role:slave_public" 46 | action: read 47 | - rid: "dcos:mesos:agent:framework:role:slave_public" 48 | action: read 49 | when: dcos_deploy_ee_package 50 | 51 | - name: Create service account for Kubernetes 52 | dcos_iam_serviceaccount: 53 | sid: "{{ dcos_k8s_app_id }}" 54 | description: "{{ dcos_k8s_app_id }} Service Account" 55 | secret_path: "{{ dcos_k8s_app_id }}/secret" 56 | groups: 57 | - "{{ dcos_k8s_app_id }}-group" 58 | state: present 59 | when: dcos_deploy_ee_package 60 | 61 | 62 | - name: ensure kubernetes is installed 63 | dcos_package: 64 | name: kubernetes 65 | app_id: "{{ dcos_k8s_app_id }}" 66 | version: "{{ dcos_k8s_package_version }}" 67 | state: present 68 | options: 69 | service: 70 | name: "{{ dcos_k8s_app_id }}" 71 | service_account: "{{ dcos_k8s_app_id }}" 72 | service_account_secret: "{{ dcos_k8s_app_id }}/secret" 73 | kubernetes: 74 | high_availability: true 75 | node_count: 3 76 | authorization_mode: "RBAC" 77 | public_node_count: 1 78 | when: dcos_deploy_ee_package 79 | 80 | - name: ensure kubernetes is installed 81 | dcos_package: 82 | name: kubernetes 83 | app_id: "{{ dcos_k8s_app_id }}" 84 | version: "{{ dcos_k8s_package_version }}" 85 | state: present 86 | options: 87 | service: 88 | name: "{{ dcos_k8s_app_id }}" 89 | kubernetes: 90 | high_availability: true 91 | node_count: 3 92 | public_node_count: 1 93 | when: not dcos_deploy_ee_package -------------------------------------------------------------------------------- /roles/bootstrap/files/fault-domain-detect: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | AWS_URL="http://169.254.169.254/latest/dynamic/instance-identity/document" 4 | 5 | AZURE_REGION_URL="http://169.254.169.254/metadata/instance/compute/location?api-version=2017-08-01&format=text" 6 | AZURE_FD_URL="http://169.254.169.254/metadata/instance/compute/platformFaultDomain?api-version=2017-04-02&format=text" 7 | 8 | GCP_METADATA_URL="http://metadata.google.internal/computeMetadata/v1/instance/zone" 9 | 10 | 11 | function aws() { 12 | METADATA="$(curl -f -m3 $AWS_URL 2>/dev/null)" 13 | rc=$? 14 | if [ $rc -ne 0 ]; then 15 | echo "unable to fetch aws region/zone. URL $AWS_URL. Ret code $rc" >&2 16 | exit 1 17 | fi 18 | REGION=$(echo $METADATA | grep -Po "\"region\"\s+:\s+\"(.*?)\"" | cut -f2 -d: | tr -d ' \"') 19 | ZONE=$(echo $METADATA | grep -Po "\"availabilityZone\"\s+:\s+\"(.*?)\"" | cut -f2 -d: | tr -d ' \"') 20 | echo "{\"fault_domain\":{\"region\":{\"name\": \"aws/$REGION\"},\"zone\":{\"name\": \"aws/$ZONE\"}}}" 21 | } 22 | 23 | function azure() { 24 | REGION=$(curl -f -m3 -H Metadata:true "$AZURE_REGION_URL" 2>/dev/null) 25 | rc=$? 26 | if [ $rc -ne 0 ]; then 27 | echo "unable to fetch azure region. URL $AZURE_REGION_URL. Ret code $rc" >&2 28 | exit 1 29 | fi 30 | 31 | FAULT_DOMAIN=$(curl -f -m3 -H Metadata:true "$AZURE_FD_URL" 2>/dev/null) 32 | rc=$? 33 | if [ $rc -ne 0 ]; then 34 | echo "unable to fetch azure fault domain. URL $AZURE_FD_URL. Ret code $rc" >&2 35 | exit 1 36 | fi 37 | 38 | echo "{\"fault_domain\":{\"region\":{\"name\": \"azure/$REGION\"},\"zone\":{\"name\": \"azure/$FAULT_DOMAIN\"}}}" 39 | } 40 | 41 | function gcp() { 42 | BODY=$(curl -f -m3 -H "Metadata-Flavor: Google" "$GCP_METADATA_URL" 2>/dev/null) 43 | rc=$? 44 | if [ $rc -ne 0 ]; then 45 | echo "unable to fetch gcp metadata. URL $GCP_METADATA_URL. Ret code $rc" >&2 46 | exit 1 47 | fi 48 | 49 | ZONE=$(echo "$BODY" | sed 's@^projects/.*/zones/\(.*\)$@\1@') 50 | REGION=$(echo "$ZONE" | sed 's@\(.*-.*\)-.*@\1@') 51 | 52 | echo "{\"fault_domain\":{\"region\":{\"name\": \"gcp/$REGION\"},\"zone\":{\"name\": \"gcp/$ZONE\"}}}" 53 | } 54 | 55 | function main() { 56 | if [ $# -eq 1 ]; then 57 | case $1 in 58 | --aws) aws; exit 0;; 59 | --azure) azure; exit 0;; 60 | --gcp) gcp; exit 0;; 61 | esac 62 | echo "invalid parameter $1. Must be one of --aws, --azure or --gcp" 63 | exit 1 64 | fi 65 | 66 | # declare PROVIDERS as an empty array 67 | PROVIDERS=() 68 | 69 | # try aws first 70 | curl -f -q -m1 "$AWS_URL" >/dev/null 2>&1 71 | if [ $? -eq 0 ]; then 72 | PROVIDERS+=("aws") 73 | fi 74 | 75 | # try azure 76 | curl -f -q -m1 -H 'Metadata:true' "$AZURE_REGION_URL" >/dev/null 2>&1 77 | if [ $? -eq 0 ]; then 78 | PROVIDERS+=("azure") 79 | fi 80 | 81 | # try gcp 82 | curl -f -q m1 -H "Metadata-Flavor: Google" "$GCP_METADATA_URL" >/dev/null 2>&1 83 | if [ $? -eq 0 ]; then 84 | PROVIDERS+=("gcp") 85 | fi 86 | 87 | if [ ${#PROVIDERS[@]} -eq 0 ]; then 88 | "ERROR: unable to detect cloud provider. Use explicit parameter --aws, --azure, or --gcp" >&2 89 | exit 1 90 | fi 91 | 92 | if [ ${#PROVIDERS[@]} -gt 1 ]; then 93 | echo "ERROR: found multiple cloud providers: ${PROVIDERS[@]}" >&2 94 | exit 1 95 | fi 96 | 97 | provider=${PROVIDERS[0]} 98 | case $provider in 99 | "aws") aws; exit 0;; 100 | "gcp") gcp; exit 0;; 101 | "azure") azure; exit 0;; 102 | *) echo "ERROR: Unknown cloud provider $provider" >&2; exit 1;; 103 | esac 104 | } 105 | 106 | main $@ 107 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | ## v0.7.0-dcos-1.11 4 | 5 | * Install DC/OS Packages with Ansible and use Kubernetes as the first example 6 | * Added Ansible module for package installation (https://github.com/dcos-labs/ansible-dcos-module) 7 | * Removed instructions for the SSH tunnel (it's not really needed anymore for the current versions of the Kubernetes package) 8 | * Separate installation of dcos cli and kubectl 9 | * Dedicated Kubernetes role roles/package/kubernetes/ that is using the package install module 10 | * Installs ifconfig to address issues with MESOS-6822 11 | * Update Kubernetes to framework version 1.2.0-1.10.5 12 | * Add support for 6443 port in Terraform 13 | * Replace unsupported docker-py with current docker module. 14 | * Added yum-utils to common tasks 15 | * Bumps docker version to 17.06.2.ce 16 | * Removes Docker live restore because of issues with MESOS-6480 17 | 18 | ## v0.6.1-dcos-1.11 19 | 20 | * Update Kubernetes framework to GA 21 | * Bump Kubernetes version to v1.9.6 22 | * Add Kubernetes upgrade doc 23 | * Other docs improvements 24 | * Major refactoring around ansible variables and removal of code duplication 25 | 26 | ## v0.6.0-dcos-1.11 27 | 28 | * Removing Terraform and referencing https://github.com/dcos/terraform-dcos for setting up the infrastructure 29 | * Adds support for GCP and Azure 30 | * Enables IPv6 31 | * Support of Fault Domain Awareness 32 | * Support of License Keys (in DC/OS Enterprise 1.11) 33 | * Bumps Docker version to 17.05.0.ce 34 | * Adopts Makefile approach for easy setup 35 | * Docs improvements 36 | * #8 On-Premises: added possibility to use other device names for existing network interface 37 | 38 | ## v0.5.0-dcos-1.10 39 | 40 | * Installs and disables dnsmasq 41 | * Disables source/dest for AWS instances check in order to get CNI/Calico working properly 42 | * Tested with DC/OS 1.10.2 43 | * Migrated repo to https://github.com/dcos-labs/terraform-ansible-dcos 44 | 45 | ## v0.4.0-alpha 46 | 47 | * #5 Adds Dynamic Inventory to read from Terraform state 48 | * Simplified directory structure for variables 49 | * Moves Docker to it's own Ansible role, set sane defaults and makes the version configurable 50 | * Installs firewalld in order to proper disable it afterwards 51 | 52 | ## v0.3.0-alpha 53 | 54 | * Tested with DC/OS 1.10 55 | * Updated configuration for rexray 0.9.0 56 | * Removed rarely used scripts and plays 57 | * Adds support for public ip detection on aws 58 | * Fixes uninstall script and sets temporary nameserver 59 | * Fix for #8 Default bootstrap folder is not part of /tmp 60 | * Terraform: New AMIs for Centos 7.3 61 | 62 | ## v0.2.1-alpha 63 | 64 | * Tested with DC/OS 1.9 65 | * Install Docker 1.13 66 | * Improved documentation 67 | 68 | ## v0.2.0-alpha 69 | 70 | * Terraform 71 | * Restructure Terraform with modules 72 | * Support for Availability Zones 73 | * Spread Private Agents across different AZ 74 | * Keep Masters in the same AZ 75 | * Create Internal LoadBalancer for Masters 76 | * Create External LoadBalancer for Masters 77 | * Create External LoadBalancer for Public Agents 78 | * Put in name prefix in front of every AWS entity 79 | * Adds Security Groups for each Roles instances, elbs 80 | * Delete EBS volumes after instance termination 81 | 82 | * Ansible 83 | * Read Availability Zone and set Mesos attribute for each agent 84 | * Support for internal Master LoadBalancer 85 | * Adds fix for DCOS-12332 Certificate did not match expected hostname CERT, ELB master_http_loadbalancer 86 | * Use prefix for the file name inside of S3 Bucket 87 | * Removed support for exhibitor backends: NFS and ZooKeeper 88 | * Preview for service deployment: Marathon-LB 89 | * Preview for DC/OS configuration: LDAP 90 | * Preview of automated upgrades for DC/OS 1.9 using the new upgrade API 91 | 92 | ## v0.1.0-alpha 93 | 94 | * Tested with DC/OS 1.8 95 | * Now works with Enterprise or OSS 96 | * Parameterized setup file for Ansible (custom download location, cluster name, etc) 97 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | RM := rm -f 2 | TERRAFORM_INSTALLER_URL := github.com/dcos/terraform-dcos 3 | 4 | # Set PATH to include local dir for locally downloaded binaries. 5 | export PATH := .:$(PATH) 6 | 7 | # Get the path to relvant binaries. 8 | TERRAFORM_CMD := $(shell command -v terraform 2> /dev/null) 9 | ANSIBLE_CMD := $(shell command -v ansible 2> /dev/null) 10 | PYTHON3_CMD := $(shell command -v python3 2> /dev/null) 11 | TERRAFORM_APPLY_ARGS ?= 12 | TERRAFORM_DESTROY_ARGS ?= 13 | 14 | UNAME := $(shell uname -s) 15 | ifeq ($(UNAME),Linux) 16 | OPEN := xdg-open 17 | else 18 | OPEN := open 19 | endif 20 | 21 | # Define a new line character to use in error strings. 22 | define n 23 | 24 | endef 25 | 26 | .PHONY: check-terraform 27 | check-terraform: 28 | ifndef TERRAFORM_CMD 29 | $(error "$n$nNo terraform command in $(PATH).$n$nPlease install via 'brew install terraform' on MacOS, or download from https://www.terraform.io/downloads.html.$n$n") 30 | endif 31 | 32 | .PHONY: check-ansible 33 | check-ansible: 34 | ifndef ANSIBLE_CMD 35 | $(error "$n$nNo ansible command in $(PATH).$n$nPlease install via 'brew install ansible' on MacOS, or download from http://docs.ansible.com/ansible/latest/intro_installation.html.$n$n") 36 | endif 37 | 38 | .PHONY: check-python3 39 | check-python3: 40 | ifndef PYTHON3_CMD 41 | $(error "$n$nNo python3 command in $(PATH).$n$nPlease install via 'brew install python3' on MacOS.$n$n") 42 | endif 43 | 44 | .PHONY: azure 45 | azure: clean check-terraform 46 | mkdir .deploy 47 | cd .deploy; \ 48 | $(TERRAFORM_CMD) init -from-module $(TERRAFORM_INSTALLER_URL)/azure; \ 49 | cp ../resources/desired_cluster_profile.azure desired_cluster_profile; \ 50 | cp ../resources/override.azure.tf override.tf; \ 51 | ../scripts/kubeapi-proxy-azure.sh; \ 52 | rm -f desired_cluster_profile.tfvars.example 53 | 54 | .PHONY: aws 55 | aws: clean check-terraform 56 | mkdir .deploy 57 | cd .deploy; \ 58 | $(TERRAFORM_CMD) init -from-module $(TERRAFORM_INSTALLER_URL)/aws; \ 59 | cp ../resources/desired_cluster_profile.aws desired_cluster_profile; \ 60 | cp ../resources/override.aws.tf override.tf; \ 61 | ../scripts/kubeapi-proxy-aws.sh; \ 62 | rm -f desired_cluster_profile.tfvars.example 63 | 64 | .PHONY: gcp 65 | gcp: clean check-terraform 66 | mkdir .deploy 67 | cd .deploy; \ 68 | $(TERRAFORM_CMD) init -from-module $(TERRAFORM_INSTALLER_URL)/gcp; \ 69 | cp ../resources/desired_cluster_profile.gcp desired_cluster_profile; \ 70 | cp ../resources/override.gcp.tf override.tf; \ 71 | ../scripts/kubeapi-proxy-gcp.sh; \ 72 | rm -f desired_cluster_profile.tfvars.example 73 | 74 | .PHONY: install-k8s 75 | install-k8s: check-ansible 76 | ansible-playbook -i inventory.py plays/kubernetes.yml 77 | 78 | .PHONY: plan-infra 79 | plan-infra: check-terraform 80 | cd .deploy; \ 81 | $(TERRAFORM_CMD) plan -var-file desired_cluster_profile -var state=none 82 | 83 | .PHONY: launch-infra 84 | launch-infra: check-terraform 85 | cd .deploy; \ 86 | $(TERRAFORM_CMD) apply -var-file desired_cluster_profile -var state=none 87 | 88 | .PHONY: destroy-infra 89 | destroy-infra: check-terraform 90 | cd .deploy; \ 91 | $(TERRAFORM_CMD) destroy $(TERRAFORM_DESTROY_ARGS) -var-file desired_cluster_profile 92 | 93 | .PHONY: ansible-ping 94 | ansible-ping: check-python3 check-ansible 95 | ansible all -i inventory.py -m ping 96 | 97 | .PHONY: ansible-install 98 | ansible-install: check-python3 check-ansible ansible-ping 99 | ansible-playbook -i inventory.py plays/install.yml 100 | 101 | .PHONY: ansible-uninstall 102 | ansible-uninstall: check-python3 check-ansible ansible-ping 103 | ansible-playbook -i inventory.py plays/uninstall.yml 104 | 105 | .PHONY: plan 106 | plan: plan-infra 107 | 108 | .PHONY: deploy 109 | deploy: launch-infra ansible-install 110 | 111 | .PHONY: ui 112 | ui: 113 | cd .deploy; \ 114 | $(OPEN) https://`terraform output "lb_external_masters"` 115 | 116 | .PHONY: public-lb 117 | public-lb: 118 | cd .deploy; \ 119 | $(OPEN) http://`terraform output "lb_external_agents"` 120 | 121 | .PHONY: uninstall 122 | uninstall: ansible-uninstall 123 | 124 | .PHONY: destroy 125 | destroy: destroy-infra 126 | 127 | .PHONY: clean 128 | clean: 129 | $(RM) -r .deploy dcos kubectl 130 | -------------------------------------------------------------------------------- /roles/package/action_plugins/dcos_iam_group.py: -------------------------------------------------------------------------------- 1 | """ 2 | Action plugin to configure a DC/OS cluster. 3 | Uses the Ansible host to connect directly to DC/OS. 4 | """ 5 | 6 | from __future__ import (absolute_import, division, print_function) 7 | __metaclass__ = type 8 | 9 | import json 10 | import subprocess 11 | import tempfile 12 | import time 13 | import os 14 | import sys 15 | 16 | from ansible.plugins.action import ActionBase 17 | from ansible.errors import AnsibleActionFail 18 | 19 | # to prevent duplicating code, make sure we can import common stuff 20 | sys.path.append(os.getcwd()) 21 | from action_plugins.common import ( 22 | ensure_dcos, 23 | ensure_dcos_security, 24 | run_command, 25 | _dcos_path 26 | ) 27 | 28 | try: 29 | from __main__ import display 30 | except ImportError: 31 | from ansible.utils.display import Display 32 | display = Display() 33 | 34 | def get_group_state(gid): 35 | """Get the current state of a group.""" 36 | 37 | r = subprocess.check_output([ 38 | 'dcos', 39 | 'security', 40 | 'org', 41 | 'groups', 42 | 'show', 43 | '--json' 44 | ], 45 | env=_dcos_path() 46 | ) 47 | groups = json.loads(r) 48 | 49 | display.vvv('looking for gid {}'.format(gid)) 50 | 51 | state = 'absent' 52 | for g in groups: 53 | try: 54 | if gid in g: 55 | state = 'present' 56 | display.vvv('found app: {}'.format(gid)) 57 | 58 | except KeyError: 59 | continue 60 | return state 61 | 62 | def group_create(gid, description): 63 | """Create a group""" 64 | display.vvv("DC/OS: IAM create group {}".format(gid)) 65 | 66 | cmd = [ 67 | 'dcos', 68 | 'security', 69 | 'org', 70 | 'groups', 71 | 'create', 72 | '--description', 73 | '\'' + description + '\'', 74 | gid, 75 | ] 76 | run_command(cmd, 'create group', stop_on_error=True) 77 | 78 | def group_update(gid, permissions): 79 | """Update group permissions""" 80 | display.vvv("DC/OS: IAM update group {}".format(gid)) 81 | 82 | for p in permissions: 83 | display.vvv("Granting {} permission on {} to group {}".format( 84 | p['rid'], p['action'], gid)) 85 | 86 | cmd = [ 87 | 'dcos', 88 | 'security', 89 | 'org', 90 | 'groups', 91 | 'grant', 92 | gid, 93 | p['rid'], 94 | p['action'] 95 | ] 96 | run_command(cmd, 'update group', stop_on_error=False) 97 | 98 | def group_delete(gid): 99 | """Delete a group""" 100 | display.vvv("DC/OS: IAM delete group {}".format(gid)) 101 | 102 | cmd = [ 103 | 'dcos', 104 | 'security', 105 | 'org', 106 | 'groups', 107 | 'delete', 108 | gid, 109 | ] 110 | run_command(cmd, 'delete group', stop_on_error=True) 111 | 112 | class ActionModule(ActionBase): 113 | def run(self, tmp=None, task_vars=None): 114 | 115 | result = super(ActionModule, self).run(tmp, task_vars) 116 | del tmp # tmp no longer has any effect 117 | 118 | if self._play_context.check_mode: 119 | # in --check mode, always skip this module execution 120 | result['skipped'] = True 121 | result['msg'] = 'The dcos task does not support check mode' 122 | return result 123 | 124 | args = self._task.args 125 | gid = args.get('gid') 126 | description = args.get('description', 'Created by Ansible') 127 | permissions = args.get('permissions', []) 128 | wanted_state = args.get('state', 'present') 129 | 130 | if gid is None: 131 | raise AnsibleActionFail('gid cannot be empty for dcos_iam_group') 132 | 133 | ensure_dcos() 134 | ensure_dcos_security() 135 | 136 | current_state = get_group_state(gid) 137 | 138 | if current_state == wanted_state: 139 | 140 | display.vvv( 141 | "DC/OS IAM group {} already in desired state {}".format(gid, wanted_state)) 142 | 143 | if wanted_state == "present": 144 | group_update(gid, permissions) 145 | 146 | result['changed'] = False 147 | else: 148 | display.vvv("DC/OS: IAM group {} not in desired state {}".format(gid, wanted_state)) 149 | 150 | if wanted_state != 'absent': 151 | group_create(gid, description) 152 | group_update(gid, permissions) 153 | 154 | else: 155 | group_delete(gid) 156 | 157 | result['changed'] = True 158 | 159 | return result 160 | -------------------------------------------------------------------------------- /docs/INSTALL_AZURE.md: -------------------------------------------------------------------------------- 1 | # Steps for DC/OS installation with Terraform and Ansible on Azure 2 | 3 | With the following guide, you are able to install a DC/OS cluster on Azure. You need the tools Terraform and Ansible installed. On MacOS, you can use [brew](https://brew.sh/) for that. 4 | 5 | ```shell 6 | $ brew install terraform 7 | $ brew install ansible 8 | ``` 9 | 10 | ## Setup infrastructure 11 | 12 | ### Pull down the DC/OS Terraform scripts below 13 | 14 | ```shell 15 | $ make azure 16 | ``` 17 | 18 | ### Configure your Azure ssh Keys 19 | 20 | Set the private key that you will be you will be using to your ssh-agent and set public key in terraform. 21 | 22 | ```shell 23 | $ ssh-add ~/.ssh/your_private_azure_key.pem 24 | ``` 25 | 26 | Add your Azure ssh key to `.deploy/desired_cluster_profile` file: 27 | ``` 28 | ssh_pub_key = "INSERT_AZURE_PUBLIC_KEY_HERE" 29 | ``` 30 | 31 | ### Configure your Azure ID Keys 32 | 33 | Follow the Terraform instructions [here](https://www.terraform.io/docs/providers/azurerm/#creating-credentials) to setup your Azure credentials to provide to terraform. 34 | 35 | When you've successfully retrieved your output of `az account list`, create a source file to easily run your credentials in the future. 36 | 37 | 38 | ```shell 39 | $ cat ~/.azure/credentials 40 | export ARM_TENANT_ID=45ef06c1-a57b-40d5-967f-88cf8example 41 | export ARM_CLIENT_SECRET=Lqw0kyzWXyEjfha9hfhs8dhasjpJUIGQhNFExAmPLE 42 | export ARM_CLIENT_ID=80f99c3a-cd7d-4931-9405-8b614example 43 | export ARM_SUBSCRIPTION_ID=846d9e22-a320-488c-92d5-41112example 44 | ``` 45 | 46 | ### Source Credentials 47 | 48 | Set your environment variables by sourcing the files before you run any terraform commands. 49 | 50 | ```shell 51 | $ source ~/.azure/credentials 52 | ``` 53 | 54 | ### Terraform deployment 55 | 56 | The setup variables for Terraform are defined in the file `.deploy/desired_cluster_profile`. You can make a change to the file and it will persist when you do other commands to your cluster in the future. 57 | 58 | For example, you can see the default configuration of your cluster: 59 | 60 | ```shell 61 | $ cat .deploy/desired_cluster_profile 62 | os = "centos_7.3" 63 | state = "none" 64 | # 65 | num_of_masters = "1" 66 | num_of_private_agents = "3" 67 | num_of_public_agents = "1" 68 | # 69 | azure_region = "East US 2" 70 | azure_bootstrap_instance_type = "Standard_DS1_v2" 71 | azure_master_instance_type = "Standard_D4_v2" 72 | azure_agent_instance_type = "Standard_D4_v2" 73 | azure_public_agent_instance_type = "Standard_D4_v2" 74 | # Inbound Master Access 75 | admin_cidr = "0.0.0.0/0" 76 | 77 | ssh_pub_key = "INSERT_AZURE_PUBLIC_KEY_HERE" 78 | ``` 79 | 80 | You can plan the profile with Terraform while referencing: 81 | 82 | ```shell 83 | $ make plan 84 | ``` 85 | 86 | If you are happy with the changes, the you can apply the profile with Terraform while referencing: 87 | 88 | ```shell 89 | $ make launch-infra 90 | ``` 91 | 92 | ## Install DC/OS 93 | 94 | Once the components are created, we can run the Ansible script to install DC/OS on the instances. 95 | 96 | The setup variables for DC/OS are defined in the file `group_vars/all/vars`. Copy the example files, by running: 97 | 98 | ```shell 99 | $ cp group_vars/all.example group_vars/all 100 | ``` 101 | 102 | The now created file `group_vars/all` is for configuring DC/OS and common variables. The variables are explained within the files. 103 | 104 | Optionally you can change the exhibitor backend to `azure`. So the master discovery is done by using Azure shared storage. For that you have to fill in the Azure Storage Account Name and secret key: 105 | 106 | ``` 107 | # Optional if dcos_iaas_target := azure 108 | dcos_exhibitor: 'azure' 109 | dcos_exhibitor_azure_account_name: 'name' 110 | dcos_exhibitor_azure_account_key: '******' 111 | ``` 112 | 113 | Ansible also needs to know how to find the instances that got created via Terraform. For that we run a dynamic inventory script called `./inventory.py`. To use it specify the script with the parameter `-i`. In example, check that all instances are reachable via Ansible: 114 | 115 | ```shell 116 | $ ansible all -i inventory.py -m ping 117 | ``` 118 | 119 | Finally, you can install DC/OS by running: 120 | 121 | ```shell 122 | $ ansible-playbook -i inventory.py plays/install.yml 123 | ``` 124 | 125 | ## Access the cluster 126 | 127 | If the installation was successful. You should be able to reach the Master load balancer. You can find the URL of the Master LB with the following command: 128 | 129 | ```shell 130 | $ make ui 131 | ``` 132 | 133 | The terraform script also created a load balancer for the public agents: 134 | 135 | ```shell 136 | $ make public-lb 137 | ``` 138 | 139 | ## Destroy the cluster 140 | 141 | To delete the Azure stack run the command: 142 | 143 | ```shell 144 | $ make destroy 145 | ``` 146 | -------------------------------------------------------------------------------- /roles/package/action_plugins/dcos_iam_user.py: -------------------------------------------------------------------------------- 1 | """ 2 | Action plugin to configure a DC/OS cluster. 3 | Uses the Ansible host to connect directly to DC/OS. 4 | """ 5 | 6 | from __future__ import (absolute_import, division, print_function) 7 | __metaclass__ = type 8 | 9 | import json 10 | import subprocess 11 | import tempfile 12 | import time 13 | import os 14 | import sys 15 | 16 | from ansible.plugins.action import ActionBase 17 | from ansible.errors import AnsibleActionFail 18 | 19 | # to prevent duplicating code, make sure we can import common stuff 20 | sys.path.append(os.getcwd()) 21 | from action_plugins.common import ( 22 | ensure_dcos, 23 | ensure_dcos_security, 24 | run_command, 25 | _dcos_path 26 | ) 27 | 28 | try: 29 | from __main__ import display 30 | except ImportError: 31 | from ansible.utils.display import Display 32 | display = Display() 33 | 34 | def get_user_state(uid): 35 | """Get the current state of a user.""" 36 | 37 | r = subprocess.check_output([ 38 | 'dcos', 39 | 'security', 40 | 'org', 41 | 'users', 42 | 'show', 43 | '--json' 44 | ], 45 | env=_dcos_path() 46 | ) 47 | users = json.loads(r) 48 | 49 | display.vvv('looking for uid {}'.format(uid)) 50 | 51 | state = 'absent' 52 | for g in users: 53 | try: 54 | if uid in g: 55 | state = 'present' 56 | display.vvv('found uid: {}'.format(uid)) 57 | 58 | except KeyError: 59 | continue 60 | return state 61 | 62 | def user_create(uid, password, description): 63 | """Create a user""" 64 | display.vvv("DC/OS: IAM create user {}".format(uid)) 65 | 66 | cmd = [ 67 | 'dcos', 68 | 'security', 69 | 'org', 70 | 'users', 71 | 'create', 72 | uid, 73 | '--description', 74 | description, 75 | '--password', 76 | password 77 | ] 78 | run_command(cmd, 'create user', stop_on_error=True) 79 | 80 | def user_update(uid, groups): 81 | """Update user groups""" 82 | display.vvv("DC/OS: IAM update user {}".format(uid)) 83 | 84 | for g in groups: 85 | display.vvv("Assigning user {} to group {}".format( 86 | uid,g)) 87 | 88 | cmd = [ 89 | 'dcos', 90 | 'security', 91 | 'org', 92 | 'groups', 93 | 'add_user', 94 | g, 95 | uid 96 | ] 97 | run_command(cmd, 'update user', stop_on_error=False) 98 | 99 | def user_delete(uid): 100 | """Delete a user""" 101 | display.vvv("DC/OS: IAM delete user {}".format(uid)) 102 | 103 | cmd = [ 104 | 'dcos', 105 | 'security', 106 | 'org', 107 | 'users', 108 | 'delete', 109 | uid, 110 | ] 111 | run_command(cmd, 'delete user', stop_on_error=True) 112 | 113 | class ActionModule(ActionBase): 114 | def run(self, tmp=None, task_vars=None): 115 | 116 | result = super(ActionModule, self).run(tmp, task_vars) 117 | del tmp # tmp no longer has any effect 118 | 119 | if self._play_context.check_mode: 120 | # in --check mode, always skip this module execution 121 | result['skipped'] = True 122 | result['msg'] = 'The dcos task does not support check mode' 123 | return result 124 | 125 | args = self._task.args 126 | uid = args.get('uid') 127 | description = args.get('description', 'Created by Ansible') 128 | password = args.get('password') 129 | groups = args.get('groups', []) 130 | wanted_state = args.get('state', 'present') 131 | 132 | if uid is None: 133 | raise AnsibleActionFail('uid cannot be empty for dcos_iam_user') 134 | 135 | if password is None: 136 | raise AnsibleActionFail('password cannot be empty for dcos_iam_user') 137 | 138 | ensure_dcos() 139 | ensure_dcos_security() 140 | 141 | current_state = get_user_state(uid) 142 | 143 | if current_state == wanted_state: 144 | 145 | display.vvv( 146 | "DC/OS IAM user {} already in desired state {}".format(uid, wanted_state)) 147 | 148 | if wanted_state == "present": 149 | user_update(uid, groups) 150 | 151 | result['changed'] = False 152 | else: 153 | display.vvv("DC/OS: IAM user {} not in desired state {}".format(uid, wanted_state)) 154 | 155 | if wanted_state != 'absent': 156 | user_create(uid, password, description) 157 | user_update(uid, groups) 158 | 159 | else: 160 | user_delete(uid) 161 | 162 | result['changed'] = True 163 | 164 | return result 165 | -------------------------------------------------------------------------------- /docs/INSTALL_GCP.md: -------------------------------------------------------------------------------- 1 | # Steps for DC/OS installation with Terraform and Ansible on GCP 2 | 3 | With the following guide, you are able to install a DC/OS cluster on GCP. You need the tools Terraform and Ansible installed. On MacOS, you can use [brew](https://brew.sh/) for that. 4 | 5 | ```shell 6 | $ brew install terraform 7 | $ brew install ansible 8 | ``` 9 | 10 | ## Setup infrastructure 11 | 12 | ### Prerequisites 13 | - [Terraform 0.11.x](https://www.terraform.io/downloads.html) 14 | - GCP Cloud Credentials. _[configure via: `gcloud auth login`](https://cloud.google.com/sdk/downloads)_ 15 | - SSH Key 16 | - Existing Google Project. 17 | 18 | ### Install Google SDK 19 | 20 | Run this command to authenticate to the Google Provider. This will bring down your keys locally on the machine for terraform to use. 21 | 22 | ```shell 23 | $ gcloud auth login 24 | $ gcloud auth application-default login 25 | ``` 26 | 27 | ### Pull down the DC/OS Terraform scripts below 28 | 29 | ```shell 30 | $ make gcp 31 | ``` 32 | 33 | ### Configure your GCP ssh keys 34 | 35 | Set the public key that you will be you will be using to your ssh-agent and set public key in terraform. This will allow you to log into to the cluster after DC/OS is deployed and also helps Terraform setup your cluster at deployment time. 36 | 37 | ```shell 38 | $ ssh-add ~/.ssh/google_compute_engine.pub 39 | ``` 40 | 41 | Add your ssh key to `.deploy/desired_cluster_profile` file: 42 | ``` 43 | gcp_ssh_pub_key_file = "INSERT_PUBLIC_KEY_PATH_HERE" 44 | ``` 45 | 46 | ### Configure a Pre-existing GCP Project 47 | 48 | ansible-dcos assumes a project already exist in GCP to start deploying your resources against. 49 | 50 | Add your GCP project to `.deploy/desired_cluster_profile` file: 51 | ``` 52 | gcp_project = "massive-bliss-781" 53 | ``` 54 | 55 | ### Terraform deployment 56 | 57 | The setup variables for Terraform are defined in the file `.deploy/desired_cluster_profile`. You can make a change to the file and it will persist when you do other commands to your cluster in the future. 58 | 59 | For example, you can see the default configuration of your cluster: 60 | 61 | ```shell 62 | $ cat .deploy/desired_cluster_profile 63 | os = "centos_7.3" 64 | state = "none" 65 | # 66 | num_of_masters = "1" 67 | num_of_private_agents = "3" 68 | num_of_public_agents = "1" 69 | # 70 | gcp_project = "YOUR_GCP_PROJECT" 71 | gcp_region = "us-central1" 72 | gcp_ssh_pub_key_file = "/PATH/YOUR_GCP_SSH_PUBLIC_KEY.pub" 73 | # 74 | # If you want to use GCP service account key instead of GCP SDK 75 | # uncomment the line below and update it with the path to the key file 76 | #gcp_credentials_key_file = "/PATH/YOUR_GCP_SERVICE_ACCOUNT_KEY.json" 77 | # 78 | gcp_bootstrap_instance_type = "n1-standard-1" 79 | gcp_master_instance_type = "n1-standard-8" 80 | gcp_agent_instance_type = "n1-standard-8" 81 | gcp_public_agent_instance_type = "n1-standard-8" 82 | # 83 | # Change public/private subnetworks e.g. "10.65." if you want to run multiple clusters in the same project 84 | gcp_compute_subnetwork_public = "10.64.0.0/22" 85 | gcp_compute_subnetwork_private = "10.64.4.0/22" 86 | # Inbound Master Access 87 | admin_cidr = "0.0.0.0/0" 88 | ``` 89 | 90 | You can plan the profile with Terraform while referencing: 91 | 92 | ```shell 93 | $ make plan 94 | ``` 95 | 96 | If you are happy with the changes, then you can apply the profile with Terraform while referencing: 97 | 98 | ```shell 99 | $ make launch-infra 100 | ``` 101 | 102 | ## Install DC/OS 103 | 104 | Once the components are created, we can run the Ansible script to install DC/OS on the instances. 105 | 106 | The setup variables for DC/OS are defined in the file `group_vars/all/vars`. Copy the example files, by running: 107 | 108 | ```shell 109 | $ cp group_vars/all.example group_vars/all 110 | ``` 111 | 112 | The now created file `group_vars/all` is for configuring DC/OS and common variables. The variables are explained within the files. 113 | 114 | Ansible also needs to know how to find the instances that got created via Terraform. For that we run a dynamic inventory script called `./inventory.py`. To use it specify the script with the parameter `-i`. In example, check that all instances are reachable via Ansible: 115 | 116 | ```shell 117 | $ ansible all -i inventory.py -m ping 118 | ``` 119 | 120 | Finally, you can install DC/OS by running: 121 | 122 | ```shell 123 | $ ansible-playbook -i inventory.py plays/install.yml 124 | ``` 125 | 126 | ## Access the cluster 127 | 128 | If the installation was successful. You should be able to reach the Master load balancer. You can find the URL of the Master LB with the following command: 129 | 130 | ```shell 131 | $ make ui 132 | ``` 133 | 134 | The terraform script also created a load balancer for the public agents: 135 | 136 | ```shell 137 | $ make public-lb 138 | ``` 139 | 140 | ## Destroy the cluster 141 | 142 | To delete the GCP stack run the command: 143 | 144 | ```shell 145 | $ make destroy 146 | ``` 147 | -------------------------------------------------------------------------------- /roles/package/action_plugins/dcos_secret.py: -------------------------------------------------------------------------------- 1 | """ 2 | Action plugin to configure a DC/OS cluster. 3 | Uses the Ansible host to connect directly to DC/OS. 4 | """ 5 | 6 | from __future__ import (absolute_import, division, print_function) 7 | __metaclass__ = type 8 | 9 | import json 10 | import subprocess 11 | import tempfile 12 | import time 13 | import os 14 | import sys 15 | 16 | from ansible.plugins.action import ActionBase 17 | from ansible.errors import AnsibleActionFail 18 | 19 | # to prevent duplicating code, make sure we can import common stuff 20 | sys.path.append(os.getcwd()) 21 | from action_plugins.common import ( 22 | ensure_dcos, 23 | ensure_dcos_security, 24 | run_command, 25 | _dcos_path 26 | ) 27 | 28 | try: 29 | from __main__ import display 30 | except ImportError: 31 | from ansible.utils.display import Display 32 | display = Display() 33 | 34 | def get_secret_value(path, store): 35 | """Get the current value of a secret.""" 36 | 37 | display.vvv('looking for secret {} '.format(path)) 38 | 39 | value = None 40 | try: 41 | r = subprocess.check_output([ 42 | 'dcos', 43 | 'security', 44 | 'secrets', 45 | 'get', 46 | '--store-id', 47 | store, 48 | '--json', 49 | path 50 | ], 51 | env=_dcos_path(), 52 | stderr=subprocess.STDOUT 53 | ) 54 | value = json.loads(r)['value'] 55 | display.vvv('secret {} has value {}'.format(path, value)) 56 | except: 57 | value = None 58 | 59 | return value 60 | 61 | def secret_create(path, value, store): 62 | """Create a secret""" 63 | 64 | display.vvv("DC/OS: create secret {} with {}".format(path, value)) 65 | 66 | cmd = [ 67 | 'dcos', 68 | 'security', 69 | 'secrets', 70 | 'create', 71 | '--store-id', 72 | store, 73 | '--value', 74 | value, 75 | path 76 | ] 77 | run_command(cmd, 'create secret', stop_on_error=True) 78 | 79 | def secret_update(path, value, store): 80 | """Update a secret""" 81 | 82 | display.vvv("DC/OS: update secret {} with {}".format(path, value)) 83 | 84 | cmd = [ 85 | 'dcos', 86 | 'security', 87 | 'secrets', 88 | 'update', 89 | '--store-id', 90 | store, 91 | '--value', 92 | value, 93 | path 94 | ] 95 | run_command(cmd, 'update secret', stop_on_error=True) 96 | 97 | def secret_delete(path, store): 98 | """Delete a secret""" 99 | 100 | display.vvv("DC/OS: delete secret {}".format(path)) 101 | 102 | cmd = [ 103 | 'dcos', 104 | 'security', 105 | 'secrets', 106 | 'delete', 107 | '--store-id', 108 | store, 109 | path 110 | ] 111 | run_command(cmd, 'delete secret', stop_on_error=True) 112 | 113 | class ActionModule(ActionBase): 114 | def run(self, tmp=None, task_vars=None): 115 | 116 | result = super(ActionModule, self).run(tmp, task_vars) 117 | del tmp # tmp no longer has any effect 118 | 119 | if self._play_context.check_mode: 120 | # in --check mode, always skip this module execution 121 | result['skipped'] = True 122 | result['msg'] = 'The dcos task does not support check mode' 123 | return result 124 | 125 | args = self._task.args 126 | path = args.get('path') 127 | if path is None: 128 | raise AnsibleActionFail('path cannot be empty for dcos_secret') 129 | store = args.get('store', 'default') 130 | value = args.get('value') 131 | wanted_state = args.get('state', 'present') 132 | 133 | ensure_dcos() 134 | ensure_dcos_security() 135 | 136 | current_value = get_secret_value(path, store) 137 | 138 | current_state = 'present' if current_value is not None else 'absent' 139 | 140 | if current_state == wanted_state: 141 | 142 | display.vvv( 143 | "DC/OS Secret {} already in desired state {}".format(path, wanted_state)) 144 | result['changed'] = False 145 | 146 | if wanted_state == "present" and current_value != value: 147 | secret_update(path, value, store) 148 | result['changed'] = True 149 | result['msg'] = "Secret {} was updated".format(path) 150 | 151 | else: 152 | display.vvv("DC/OS Secret {} not in desired state {}".format(path, wanted_state)) 153 | 154 | if wanted_state != 'absent': 155 | secret_create(path, value, store) 156 | result['msg'] = "Secret {} was created".format(path) 157 | 158 | else: 159 | secret_delete(path, store) 160 | result['msg'] = "Secret {} was deleted".format(path) 161 | 162 | result['changed'] = True 163 | 164 | return result 165 | -------------------------------------------------------------------------------- /docs/INSTALL_AWS.md: -------------------------------------------------------------------------------- 1 | # Steps for DC/OS installation with Terraform and Ansible on AWS 2 | 3 | With the following guide, you are able to install a DC/OS cluster on AWS. You need the tools Terraform and Ansible installed. On MacOS, you can use [brew](https://brew.sh/) for that. 4 | 5 | ```shell 6 | $ brew install terraform 7 | $ brew install ansible 8 | ``` 9 | 10 | ## Setup infrastructure 11 | 12 | ### Pull down the DC/OS Terraform scripts below 13 | 14 | ```shell 15 | $ make aws 16 | ``` 17 | 18 | ### Configure your AWS ssh Keys 19 | 20 | In the file `.deploy/desired_cluster_profile` there is a `key_name` variable. This key must be added to your host machine running your terraform script as it will be used to log into the machines to run setup scripts. The default is `default`. You can find aws documentation that talks about this [here](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html#how-to-generate-your-own-key-and-import-it-to-aws). 21 | 22 | When you have your key available, you can use ssh-add. 23 | 24 | ```shell 25 | $ ssh-add ~/.ssh/path_to_you_key.pem 26 | ``` 27 | 28 | ### Configure your IAM AWS Keys 29 | 30 | You will need your AWS aws_access_key_id and aws_secret_access_key. If you dont have one yet, you can get them from the AWS documentation [here]( 31 | http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html). When you finally get them, you can install it in your home directory. The default location is `$HOME/.aws/credentials` on Linux and OS X, or `"%USERPROFILE%\.aws\credentials"` for Windows users. 32 | 33 | Here is an example of the output when you're done: 34 | 35 | ```shell 36 | $ cat ~/.aws/credentials 37 | [default] 38 | aws_access_key_id = ACHEHS71DG712w7EXAMPLE 39 | aws_secret_access_key = /R8SHF+SHFJaerSKE83awf4ASyrF83sa471DHSEXAMPLE 40 | ``` 41 | 42 | ### Terraform deployment 43 | 44 | The setup variables for Terraform are defined in the file `.deploy/desired_cluster_profile`. You can make a change to the file and it will persist when you do other commands to your cluster in the future. 45 | 46 | For example, you can see the default configuration of your cluster: 47 | 48 | ```shell 49 | $ cat .deploy/desired_cluster_profile 50 | os = "centos_7.4" 51 | state = "none" 52 | # 53 | num_of_masters = "1" 54 | num_of_private_agents = "3" 55 | num_of_public_agents = "1" 56 | # 57 | aws_region = "us-west-2" 58 | aws_bootstrap_instance_type = "m4.large" 59 | aws_master_instance_type = "m4.2xlarge" 60 | aws_agent_instance_type = "m4.2xlarge" 61 | aws_public_agent_instance_type = "m4.2xlarge" 62 | ssh_key_name = "default" 63 | # Inbound Master Access 64 | admin_cidr = "0.0.0.0/0" 65 | ``` 66 | 67 | You can plan the profile with Terraform while referencing: 68 | 69 | ```shell 70 | $ make plan 71 | ``` 72 | 73 | If you are happy with the changes, the you can apply the profile with Terraform while referencing: 74 | 75 | ```shell 76 | $ make launch-infra 77 | ``` 78 | 79 | ## Install DC/OS 80 | 81 | Once the components are created, we can run the Ansible script to install DC/OS on the instances. 82 | 83 | The setup variables for DC/OS are defined in the file `group_vars/all/vars`. Copy the example files, by running: 84 | 85 | ```shell 86 | $ cp group_vars/all.example group_vars/all 87 | ``` 88 | 89 | The now created file `group_vars/all` is for configuring DC/OS and common variables. The variables are explained within the files. 90 | 91 | Optionally you can change the exhibitor backend to `aws_s3`. So the master discovery is done by using a S3 bucket, this is suggested for production deployments on AWS. For that you need to create an S3 bucket on your own and specify the AWS credentials, the bucket name, and the bucket region: 92 | 93 | ``` 94 | # Optional if dcos_iaas_target := aws 95 | dcos_exhibitor: 'aws_s3' 96 | dcos_exhibitor_explicit_keys: true 97 | dcos_aws_access_key_id: 'YOUR_AWS_ACCESS_KEY_ID' 98 | dcos_aws_secret_access_key: 'YOUR_AWS_SECRET_ACCESS_KEY' 99 | dcos_aws_region: 'YOUR_BUCKET_REGION' 100 | dcos_s3_bucket: 'YOUR_BUCKET_NAME' 101 | ``` 102 | 103 | Ansible also needs to know how to find the instances that got created via Terraform. For that we run a dynamic inventory script called `./inventory.py`. To use it specify the script with the parameter `-i`. In example, check that all instances are reachable via Ansible: 104 | 105 | ```shell 106 | $ ansible all -i inventory.py -m ping 107 | ``` 108 | 109 | Finally, you can install DC/OS by running: 110 | 111 | ```shell 112 | $ ansible-playbook -i inventory.py plays/install.yml 113 | ``` 114 | 115 | ## Access the cluster 116 | 117 | If the installation was successful. You should be able to reach the Master load balancer. You can find the URL of the Master LB with the following command: 118 | 119 | ```shell 120 | $ make ui 121 | ``` 122 | 123 | The terraform script also created a load balancer for the public agents: 124 | 125 | ```shell 126 | $ make public-lb 127 | ``` 128 | 129 | ## Destroy the cluster 130 | 131 | To delete the AWS stack run the command: 132 | 133 | ```shell 134 | $ make destroy 135 | ``` 136 | -------------------------------------------------------------------------------- /roles/package/action_plugins/dcos_marathon.py: -------------------------------------------------------------------------------- 1 | """ 2 | Action plugin to configure a DC/OS cluster. 3 | Uses the Ansible host to connect directly to DC/OS. 4 | """ 5 | 6 | from __future__ import (absolute_import, division, print_function) 7 | __metaclass__ = type 8 | 9 | import json 10 | import subprocess 11 | import tempfile 12 | import time 13 | import os 14 | import sys 15 | 16 | from ansible.plugins.action import ActionBase 17 | from ansible.errors import AnsibleActionFail 18 | 19 | # to prevent duplicating code, make sure we can import common stuff 20 | sys.path.append(os.getcwd()) 21 | from action_plugins.common import ensure_dcos, run_command, _dcos_path 22 | 23 | try: 24 | from __main__ import display 25 | except ImportError: 26 | from ansible.utils.display import Display 27 | display = Display() 28 | 29 | def get_app_state(app_id): 30 | """Get the current state of an app.""" 31 | r = subprocess.check_output(['dcos', 'marathon', 'app', 'list', '--json' ], env=_dcos_path()) 32 | apps = json.loads(r) 33 | 34 | display.vvv('looking for app_id {}'.format(app_id)) 35 | 36 | state = 'absent' 37 | for a in apps: 38 | try: 39 | if app_id in a['id']: 40 | state = 'present' 41 | display.vvv('found app: {}'.format(app_id)) 42 | 43 | except KeyError: 44 | continue 45 | return state 46 | 47 | def app_create(app_id, options): 48 | """Deploy an app via Marathon""" 49 | display.vvv("DC/OS: Marathon create app {}".format(app_id)) 50 | 51 | # create a temporary file for the options json file 52 | with tempfile.NamedTemporaryFile('w+') as f: 53 | json.dump(options, f) 54 | 55 | # force write the file to disk to make sure subcommand can read it 56 | f.flush() 57 | os.fsync(f) 58 | 59 | display.vvv(subprocess.check_output( 60 | ['cat', f.name]).decode()) 61 | 62 | cmd = [ 63 | 'dcos', 64 | 'marathon', 65 | 'app', 66 | 'add', 67 | f.name 68 | ] 69 | run_command(cmd, 'add app', stop_on_error=True) 70 | 71 | 72 | def app_update(app_id, options): 73 | """Update an app via Marathon""" 74 | display.vvv("DC/OS: Marathon update app {}".format(app_id)) 75 | 76 | # create a temporary file for the options json file 77 | with tempfile.NamedTemporaryFile('w+') as f: 78 | json.dump(options, f) 79 | 80 | # force write the file to disk to make sure subcommand can read it 81 | f.flush() 82 | os.fsync(f) 83 | 84 | cmd = [ 85 | 'dcos', 86 | 'marathon', 87 | 'app', 88 | 'update', 89 | '--force', 90 | app_id 91 | ] 92 | 93 | from subprocess import Popen, PIPE 94 | 95 | p = Popen(cmd, env=_dcos_path(), stdin=PIPE, stdout=PIPE, stderr=PIPE) 96 | stdout, stderr = p.communicate(json.dumps(options)) 97 | 98 | display.vvv("stdout {}".format(stdout)) 99 | display.vvv("stderr {}".format(stderr)) 100 | 101 | def app_remove(app_id): 102 | """Remove an app via Marathon""" 103 | display.vvv("DC/OS: Marathon remove app {}".format(app_id)) 104 | 105 | cmd = [ 106 | 'dcos', 107 | 'marathon', 108 | 'app', 109 | 'remove', 110 | '/' + app_id, 111 | ] 112 | run_command(cmd, 'remove app', stop_on_error=True) 113 | 114 | class ActionModule(ActionBase): 115 | def run(self, tmp=None, task_vars=None): 116 | 117 | result = super(ActionModule, self).run(tmp, task_vars) 118 | del tmp # tmp no longer has any effect 119 | 120 | if self._play_context.check_mode: 121 | # in --check mode, always skip this module execution 122 | result['skipped'] = True 123 | result['msg'] = 'The dcos task does not support check mode' 124 | return result 125 | 126 | args = self._task.args 127 | state = args.get('state', 'present') 128 | 129 | # ensure app_id has a single leading forward slash 130 | app_id = '/' + args.get('app_id', '').strip('/') 131 | 132 | options = args.get('options') or {} 133 | options['id']= app_id 134 | 135 | ensure_dcos() 136 | 137 | current_state = get_app_state(app_id) 138 | wanted_state = state 139 | 140 | if current_state == wanted_state: 141 | 142 | display.vvv( 143 | "Marathon app {} already in desired state {}".format(app_id, wanted_state)) 144 | 145 | if wanted_state == "present": 146 | app_update(app_id, options) 147 | 148 | result['changed'] = False 149 | else: 150 | display.vvv("Marathon app {} not in desired state {}".format(app_id, wanted_state)) 151 | 152 | if wanted_state != 'absent': 153 | app_create(app_id, options) 154 | else: 155 | app_remove(app_id) 156 | 157 | result['changed'] = True 158 | 159 | return result 160 | -------------------------------------------------------------------------------- /roles/package/action_plugins/dcos_connection.py: -------------------------------------------------------------------------------- 1 | """ 2 | Action plugin to configure a DC/OS cluster. 3 | Uses the Ansible host to connect directly to DC/OS. 4 | """ 5 | 6 | from __future__ import (absolute_import, division, print_function) 7 | __metaclass__ = type 8 | 9 | import base64 10 | import json 11 | import subprocess 12 | import time 13 | import os 14 | import sys 15 | 16 | try: 17 | from urllib.parse import urlparse 18 | except ImportError: 19 | from urlparse import urlparse 20 | 21 | from ansible.plugins.action import ActionBase 22 | from ansible.errors import AnsibleActionFail 23 | 24 | # to prevent duplicating code, make sure we can import common stuff 25 | sys.path.append(os.getcwd()) 26 | sys.path.append(os.getcwd() + '/roles/package/') 27 | from action_plugins.common import ensure_dcos, run_command, _dcos_path 28 | 29 | try: 30 | from __main__ import display 31 | except ImportError: 32 | from ansible.utils.display import Display 33 | display = Display() 34 | 35 | DCOS_CONNECT_FLAGS = ['insecure', 'no_check'] 36 | DCOS_AUTH_OPTS = [ 37 | 'username', 38 | 'password', 39 | 'password_env', 40 | 'password_file', 41 | 'provider', 42 | 'private_key', 43 | ] 44 | DCOS_CONNECT_OPTS = DCOS_AUTH_OPTS + ['ca_certs'] 45 | 46 | def check_cluster(name=None, url=None): 47 | """Check whether cluster is already setup. 48 | 49 | :param url: url of the cluster 50 | :return: boolean whether cluster is already setup 51 | """ 52 | 53 | if url is not None: 54 | fqdn = urlparse(url).netloc 55 | else: 56 | fqdn = None 57 | 58 | attached_cluster = None 59 | wanted_cluster = None 60 | 61 | clusters = subprocess.check_output(['dcos', 'cluster', 'list', '--json'], env=_dcos_path()) 62 | for c in json.loads(clusters): 63 | if fqdn == urlparse(c['url']).netloc: 64 | wanted_cluster = c 65 | elif c['name'] == name: 66 | wanted_cluster = c 67 | if c['attached'] is True: 68 | attached_cluster = c 69 | 70 | display.vvv('wanted:\n{}\nattached:\n{}\n'.format(wanted_cluster, 71 | attached_cluster)) 72 | 73 | if wanted_cluster is None: 74 | return False 75 | elif wanted_cluster == attached_cluster: 76 | return True 77 | else: 78 | subprocess.check_call( 79 | ['dcos', 'cluster', 'attach', wanted_cluster['cluster_id']], env=_dcos_path()) 80 | return True 81 | 82 | 83 | def parse_connect_options(cluster_options=True, **kwargs): 84 | valid_opts = DCOS_CONNECT_OPTS if cluster_options else DCOS_AUTH_OPTS 85 | cli_args = [] 86 | for k, v in kwargs.items(): 87 | cli_k = '--' + k.replace('_', '-') 88 | if cluster_options and k in DCOS_CONNECT_FLAGS and v is True: 89 | cli_args.append(cli_k) 90 | if k in valid_opts: 91 | cli_args.extend([cli_k, v]) 92 | return cli_args 93 | 94 | 95 | def ensure_auth(**connect_args): 96 | valid = False 97 | r = run_command(['dcos', 'config', 'show', 'core.dcos_acs_token']) 98 | 99 | if r.returncode == 0: 100 | parts = r.stdout.read().decode().split('.') 101 | info = json.loads(base64.b64decode(parts[1])) 102 | exp = int(info['exp']) 103 | limit = int(time.time()) + 5 * 60 104 | if exp > limit: 105 | valid = True 106 | 107 | if not valid: 108 | refresh_auth(**connect_args) 109 | 110 | 111 | def refresh_auth(**kwargs): 112 | """Run the authentication command using the DC/OS CLI.""" 113 | cli_args = parse_connect_options(False, **kwargs) 114 | return run_command(['dcos', 'auth', 'login'] + cli_args, 115 | 'refresh auth token', True) 116 | 117 | 118 | def connect_cluster(**kwargs): 119 | """Connect to a DC/OS cluster by url""" 120 | 121 | changed = False 122 | url = kwargs.get('url') 123 | 124 | if not check_cluster(kwargs.get('name'), url): 125 | if url is None: 126 | raise AnsibleActionFail( 127 | 'Not connected: you need to specify the cluster url') 128 | 129 | display.vvv('DC/OS cluster not setup, setting up') 130 | 131 | cli_args = parse_connect_options(**kwargs) 132 | display.vvv('args: {}'.format(cli_args)) 133 | 134 | subprocess.check_call(['dcos', 'cluster', 'setup', url] + cli_args, env=_dcos_path()) 135 | changed = True 136 | 137 | # ensure_auth(**kwargs) 138 | return changed 139 | 140 | 141 | class ActionModule(ActionBase): 142 | def run(self, tmp=None, task_vars=None): 143 | 144 | result = super(ActionModule, self).run(tmp, task_vars) 145 | del tmp # tmp no longer has any effect 146 | 147 | if self._play_context.check_mode: 148 | # in --check mode, always skip this module execution 149 | result['skipped'] = True 150 | result['msg'] = 'The dcos task does not support check mode' 151 | return result 152 | 153 | args = self._task.args 154 | 155 | ensure_dcos() 156 | 157 | result['changed'] = connect_cluster(**args) 158 | return result -------------------------------------------------------------------------------- /inventory.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | ''' 3 | dynamic inventory script to parse terraform output 4 | ''' 5 | 6 | import json 7 | import subprocess 8 | import argparse 9 | 10 | class TerraformInventory(object): 11 | 12 | def _empty_inventory(self): 13 | return {"_meta": {"hostvars": {}}} 14 | 15 | def parse_cli_args(self): 16 | ''' Command line argument processing ''' 17 | 18 | parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on Terraform Output') 19 | parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)') 20 | parser.add_argument('--host', action='store', help='Get all the variables about a specific instance') 21 | self.args = parser.parse_args() 22 | 23 | def push_hosts(self, my_dict, key, element): 24 | ''' Push hostname entries to a group ''' 25 | 26 | parent_group = my_dict.setdefault(key, {}) 27 | parent_group.update({"hosts": element}) 28 | 29 | def push_var(self, my_dict, key, element): 30 | ''' Push variables to a group ''' 31 | 32 | parent_group = my_dict.setdefault(key, {}) 33 | var_groups = parent_group.setdefault('vars', {}) 34 | var_groups.update(element) 35 | 36 | def push_child(self, my_dict, key, element): 37 | ''' Push a group as a child of another group. ''' 38 | 39 | parent_group = my_dict.setdefault(key, {}) 40 | child_groups = parent_group.setdefault('children', []) 41 | if element not in child_groups: 42 | child_groups.append(element) 43 | 44 | def parse_terraform(self): 45 | ''' Retrieve json output from cmd and parse instances and variables ''' 46 | 47 | cmd_read = subprocess.getoutput("cd .deploy && terraform output -json") 48 | terraform_data = json.loads(cmd_read) 49 | 50 | for entry in terraform_data: 51 | 52 | # Add group bootstraps 53 | if entry == 'bootstrap_public_ips': 54 | self.push_hosts(self.inventory, 'bootstraps', terraform_data['bootstrap_public_ips']['value'].split()) 55 | self.push_child(self.inventory, 'common', 'bootstraps') 56 | 57 | # Add group masters 58 | elif entry == 'master_public_ips': 59 | self.push_hosts(self.inventory, 'masters', terraform_data['master_public_ips']['value']) 60 | self.push_child(self.inventory, 'common', 'masters') 61 | 62 | # Add group agents 63 | elif entry == 'agent_public_ips': 64 | self.push_hosts(self.inventory, 'agents', terraform_data['agent_public_ips']['value']) 65 | self.push_child(self.inventory, 'common', 'agents') 66 | 67 | # Add group public agents 68 | elif entry == 'public_agent_public_ips': 69 | self.push_hosts(self.inventory, 'agent_publics', terraform_data['public_agent_public_ips']['value']) 70 | self.push_child(self.inventory, 'common', 'agent_publics') 71 | 72 | # Add variables 73 | elif entry == 'bootstrap_private_ips': 74 | self.push_var(self.inventory, 'common', {"dcos_bootstrap_ip": terraform_data['bootstrap_private_ips']['value']}) 75 | 76 | elif entry == 'master_private_ips': 77 | self.push_var(self.inventory, 'common', {"dcos_master_list": terraform_data['master_private_ips']['value']}) 78 | 79 | elif entry == 'dns_resolvers': 80 | self.push_var(self.inventory, 'common', {"dcos_resolvers": terraform_data['dns_resolvers']['value'] }) 81 | 82 | elif entry == 'dns_search': 83 | self.push_var(self.inventory, 'common', {"dcos_dns_search": terraform_data['dns_search']['value'] }) 84 | 85 | elif entry == 'lb_internal_masters': 86 | self.push_var(self.inventory, 'common', {"dcos_exhibitor_address": terraform_data['lb_internal_masters']['value'] }) 87 | 88 | elif entry == 'lb_external_masters': 89 | self.push_var(self.inventory, 'common', {"dcos_master_address": terraform_data['lb_external_masters']['value'] }) 90 | 91 | elif entry == 'cluster_prefix': 92 | self.push_var(self.inventory, 'common', {"dcos_s3_prefix": terraform_data['cluster_prefix']['value']}) 93 | self.push_var(self.inventory, 'common', {"dcos_exhibitor_azure_prefix": terraform_data['cluster_prefix']['value']}) 94 | 95 | elif entry == 'ip_detect': 96 | self.push_var(self.inventory, 'common', {"dcos_iaas_target": terraform_data['ip_detect']['value']}) 97 | 98 | def json_format_dict(self, data, pretty=False): 99 | ''' Converts a dict to a JSON object and dumps it as a formatted string ''' 100 | 101 | if pretty: 102 | return json.dumps(data, sort_keys=True, indent=2) 103 | else: 104 | return json.dumps(data) 105 | 106 | def __init__(self): 107 | ''' Main execution path ''' 108 | 109 | # Initialize inventory 110 | self.inventory = self._empty_inventory() 111 | 112 | # Read settings and parse CLI arguments 113 | self.parse_cli_args() 114 | 115 | # Parse hosts and variables form Terraform output 116 | self.parse_terraform() 117 | 118 | # Data to print 119 | if self.args.host: 120 | data_to_print = self._empty_inventory() 121 | 122 | elif self.args.list: 123 | # Display list of instances for inventory 124 | data_to_print = self.json_format_dict(self.inventory, True) 125 | 126 | print(data_to_print) 127 | 128 | if __name__ == '__main__': 129 | # Run the script 130 | TerraformInventory() 131 | -------------------------------------------------------------------------------- /roles/package/action_plugins/dcos_package.py: -------------------------------------------------------------------------------- 1 | """ 2 | Action plugin to configure a DC/OS cluster. 3 | Uses the Ansible host to connect directly to DC/OS. 4 | """ 5 | 6 | from __future__ import (absolute_import, division, print_function) 7 | __metaclass__ = type 8 | 9 | import json 10 | import subprocess 11 | import tempfile 12 | import time 13 | import os 14 | import sys 15 | 16 | from ansible.plugins.action import ActionBase 17 | from ansible.errors import AnsibleActionFail 18 | 19 | # to prevent duplicating code, make sure we can import common stuff 20 | sys.path.append(os.getcwd()) 21 | from action_plugins.common import ensure_dcos, run_command, _dcos_path 22 | 23 | try: 24 | from __main__ import display 25 | except ImportError: 26 | from ansible.utils.display import Display 27 | display = Display() 28 | 29 | def get_current_version(package, app_id): 30 | """Get the current version of an installed package.""" 31 | r = subprocess.check_output(['dcos', 'package', 'list', '--json', '--app-id='+app_id ], env=_dcos_path()) 32 | packages = json.loads(r) 33 | 34 | display.vvv('looking for package {} app_id {}'.format(package, app_id)) 35 | 36 | v = None 37 | for p in packages: 38 | try: 39 | if p['name'] == package and '/' + app_id in p['apps']: 40 | v = p['version'] 41 | except KeyError: 42 | continue 43 | display.vvv('{} current version: {}'.format(package, v)) 44 | return v 45 | 46 | 47 | def get_wanted_version(version, state): 48 | if state == 'absent': 49 | return None 50 | return version 51 | 52 | def install_package(package, version, options): 53 | """Install a Universe package on DC/OS.""" 54 | display.vvv("DC/OS: installing package {} version {}".format( 55 | package, version)) 56 | 57 | # create a temporary file for the options json file 58 | with tempfile.NamedTemporaryFile('w+') as f: 59 | json.dump(options, f) 60 | 61 | # force write the file to disk to make sure subcommand can read it 62 | f.flush() 63 | os.fsync(f) 64 | 65 | display.vvv(subprocess.check_output( 66 | ['cat', f.name]).decode()) 67 | 68 | cmd = [ 69 | 'dcos', 70 | 'package', 71 | 'install', 72 | package, 73 | '--yes', 74 | '--package-version', 75 | version, 76 | '--options', 77 | f.name 78 | ] 79 | run_command(cmd, 'install package', stop_on_error=True) 80 | 81 | def update_package(package, app_id, version, options): 82 | """Update a Universe package on DC/OS.""" 83 | display.vvv("DC/OS: updating package {} version {}".format( 84 | package, version)) 85 | 86 | app_remove(app_id) 87 | time.sleep(20) 88 | while get_current_version(package, app_id) is not None: 89 | time.sleep(1) 90 | 91 | install_package(package, version, options) 92 | 93 | def uninstall_package(package, app_id): 94 | display.vvv("DC/OS: uninstalling package {}".format(package)) 95 | 96 | cmd = [ 97 | 'dcos', 98 | 'package', 99 | 'uninstall', 100 | package, 101 | '--yes', 102 | '--app', 103 | '--app-id', 104 | '/' + app_id, 105 | ] 106 | run_command(cmd, 'uninstall package', stop_on_error=True) 107 | 108 | def app_remove(app_id): 109 | display.vvv("DC/OS: remove app {}".format(app_id)) 110 | 111 | cmd = [ 112 | 'dcos', 113 | 'marathon', 114 | 'app', 115 | 'remove', 116 | '/' + app_id, 117 | ] 118 | run_command(cmd, 'remove app', stop_on_error=True) 119 | 120 | class ActionModule(ActionBase): 121 | def run(self, tmp=None, task_vars=None): 122 | 123 | result = super(ActionModule, self).run(tmp, task_vars) 124 | del tmp # tmp no longer has any effect 125 | 126 | if self._play_context.check_mode: 127 | # in --check mode, always skip this module execution 128 | result['skipped'] = True 129 | result['msg'] = 'The dcos task does not support check mode' 130 | return result 131 | 132 | args = self._task.args 133 | package_name = args.get('name', None) 134 | package_version = args.get('version', None) 135 | state = args.get('state', 'present') 136 | 137 | # ensure app_id has no leading or trailing / 138 | app_id = args.get('app_id', package_name).strip('/') 139 | 140 | options = args.get('options') or {} 141 | try: 142 | options['service']['name']= app_id 143 | except KeyError: 144 | options['service'] = {'name': app_id } 145 | 146 | ensure_dcos() 147 | 148 | current_version = get_current_version(package_name, app_id) 149 | wanted_version = get_wanted_version(package_version, state) 150 | 151 | if current_version == wanted_version: 152 | display.vvv( 153 | "Package {} already in desired state".format(package_name)) 154 | 155 | if state == "present": 156 | update_package(package_name, app_id, wanted_version, options) 157 | 158 | result['changed'] = False 159 | else: 160 | display.vvv("Package {} not in desired state".format(package_name)) 161 | if wanted_version is not None: 162 | if current_version is not None: 163 | update_package(package_name, app_id, wanted_version, options) 164 | else: 165 | install_package(package_name, wanted_version, options) 166 | else: 167 | uninstall_package(package_name, app_id) 168 | 169 | result['changed'] = True 170 | 171 | return result 172 | -------------------------------------------------------------------------------- /docs/INSTALL_KUBERNETES.md: -------------------------------------------------------------------------------- 1 | # Kubernetes on DC/OS as-a-Service On-Premises and on Cloud Providers 2 | 3 | Kubernetes is now available as a DC/OS package to quickly, and reliably run Kubernetes clusters on Mesosphere DC/OS. 4 | 5 | ## Known limitations 6 | 7 | Before proceeding, please check the [current Kubernetes package limitations](https://docs.mesosphere.com/service-docs/kubernetes/1.2.1-1.10.6/limitations/). 8 | 9 | ## Pre-Requisites 10 | 11 | Make sure your cluster fulfils the [Kubernetes package default requirements](https://docs.mesosphere.com/service-docs/kubernetes/1.2.1-1.10.6/install/#prerequisites/). 12 | 13 | ## Install Kubernetes on DC/OS package 14 | 15 | ### On-Premises installation 16 | 17 | To start the package installation trigger the play `plays/kubernetes.yml`. The command for that is: 18 | 19 | ```shell 20 | $ ansible-playbook plays/kubernetes.yml 21 | ``` 22 | 23 | ### Cloud Providers installation 24 | 25 | To start the package installation trigger the play `plays/kubernetes.yml`. The command for that is: 26 | 27 | ```shell 28 | $ ansible-playbook -i inventory.py plays/kubernetes.yml 29 | ``` 30 | 31 | ### Verify installation process 32 | 33 | The Kubernetes package installation will take place. 34 | 35 | You can watch the progress what was deployed manually with: 36 | 37 | ```shell 38 | $ watch ./dcos kubernetes plan status deploy 39 | ``` 40 | 41 | Below is an example of how it looks like when the install ran successfully: 42 | 43 | ``` 44 | deploy (serial strategy) (COMPLETE) 45 | ├─ etcd (serial strategy) (COMPLETE) 46 | │ ├─ etcd-0:[peer] (COMPLETE) 47 | │ ├─ etcd-1:[peer] (COMPLETE) 48 | │ └─ etcd-2:[peer] (COMPLETE) 49 | ├─ apiserver (dependency strategy) (COMPLETE) 50 | │ ├─ kube-apiserver-0:[instance] (COMPLETE) 51 | │ ├─ kube-apiserver-1:[instance] (COMPLETE) 52 | │ └─ kube-apiserver-2:[instance] (COMPLETE) 53 | ├─ mandatory-addons (serial strategy) (COMPLETE) 54 | │ ├─ mandatory-addons-0:[additional-cluster-role-bindings] (COMPLETE) 55 | │ ├─ mandatory-addons-0:[kubelet-tls-bootstrapping] (COMPLETE) 56 | │ ├─ mandatory-addons-0:[kube-dns] (COMPLETE) 57 | │ ├─ mandatory-addons-0:[metrics-server] (COMPLETE) 58 | │ ├─ mandatory-addons-0:[dashboard] (COMPLETE) 59 | │ └─ mandatory-addons-0:[ark] (COMPLETE) 60 | ├─ kubernetes-api-proxy (dependency strategy) (COMPLETE) 61 | │ └─ kubernetes-api-proxy-0:[install] (COMPLETE) 62 | ├─ controller-manager (dependency strategy) (COMPLETE) 63 | │ ├─ kube-controller-manager-0:[instance] (COMPLETE) 64 | │ ├─ kube-controller-manager-1:[instance] (COMPLETE) 65 | │ └─ kube-controller-manager-2:[instance] (COMPLETE) 66 | ├─ scheduler (dependency strategy) (COMPLETE) 67 | │ ├─ kube-scheduler-0:[instance] (COMPLETE) 68 | │ ├─ kube-scheduler-1:[instance] (COMPLETE) 69 | │ └─ kube-scheduler-2:[instance] (COMPLETE) 70 | ├─ node (dependency strategy) (COMPLETE) 71 | │ ├─ kube-node-0:[kube-proxy, coredns, kubelet] (COMPLETE) 72 | │ ├─ kube-node-1:[kube-proxy, coredns, kubelet] (COMPLETE) 73 | │ └─ kube-node-2:[kube-proxy, coredns, kubelet] (COMPLETE) 74 | └─ public-node (dependency strategy) (COMPLETE) 75 | └─ kube-node-public-0:[kube-proxy, coredns, kubelet] (COMPLETE) 76 | ``` 77 | 78 | After that, all kubernetes tasks are running and the `kubectl` is configured to access the Kubernetes API from outside the DC/OS cluster. 79 | 80 | ### Accessing the Kubernetes API 81 | 82 | Let's test accessing the Kubernetes API and list the Kubernetes cluster nodes: 83 | 84 | ```shell 85 | $ ./kubectl get nodes 86 | NAME STATUS ROLES AGE VERSION 87 | kube-node-0-kubelet.kubernetes.mesos Ready 3m v1.10.5 88 | kube-node-1-kubelet.kubernetes.mesos Ready 3m v1.10.5 89 | kube-node-2-kubelet.kubernetes.mesos Ready 3m v1.10.5 90 | kube-node-public-0-kubelet.kubernetes.mesos Ready 1m v1.10.5 91 | ``` 92 | 93 | ## Upgrade Kubernetes on DC/OS package 94 | 95 | In order to upgrade Kubernetes on DC/OS package, you have to set the target package version of Kubernetes on DC/OS inside of the file `plays/kubernetes.yml`. So for example if you want to upgrade to Kubernetes on DC/OS `1.2.1-1.10.6`, specify the version within the variable `dcos_k8s_package_version`. 96 | 97 | ```yaml 98 | roles: 99 | - role: package/kubernetes 100 | vars: 101 | dcos_k8s_enabled: true 102 | dcos_k8s_app_id: 'kubernetes' 103 | dcos_k8s_package_version: '1.2.1-1.10.6' 104 | ``` 105 | 106 | ### On-Premises upgrade 107 | 108 | To start the package upgrade trigger the play `plays/kubernetes.yml`. The command for that is: 109 | 110 | ```shell 111 | $ ansible-playbook plays/kubernetes.yml 112 | ``` 113 | 114 | ### Cloud Providers upgrade 115 | 116 | To start the package upgrade trigger the play `plays/kubernetes.yml`. The command for that is: 117 | 118 | ```shell 119 | $ ansible-playbook -i inventory.py plays/kubernetes.yml 120 | ``` 121 | 122 | For more details, please check the official [Kubernetes package upgrade doc](https://docs.mesosphere.com/services/kubernetes/1.2.1-1.10.6/upgrade/#updating-the-package-version). 123 | 124 | ## Uninstall Kubernetes on DC/OS package 125 | 126 | In order to uninstall Kubernetes on DC/OS, you have to disable the package by changing the variable `dcos_k8s_enabled` to `false` inside of the file `plays/kubernetes.yml`. For example: 127 | 128 | ```yaml 129 | roles: 130 | - role: package/kubernetes 131 | vars: 132 | dcos_k8s_enabled: false 133 | dcos_k8s_app_id: 'kubernetes' 134 | dcos_k8s_package_version: '1.2.1-1.10.6' 135 | ``` 136 | 137 | ### On-Premises uninstallation 138 | 139 | To start the package uninstallation trigger the play `plays/kubernetes.yml`. The command for that is: 140 | 141 | ```shell 142 | $ ansible-playbook plays/kubernetes.yml 143 | ``` 144 | 145 | ### Cloud Providers uninstallation 146 | 147 | To start the package uninstallation trigger the play `plays/kubernetes.yml`. The command for that is: 148 | 149 | ```shell 150 | $ ansible-playbook -i inventory.py plays/kubernetes.yml 151 | ``` 152 | 153 | ## Documentation 154 | 155 | For more details, please check the official [Kubernetes package docs](https://docs.mesosphere.com/service-docs/kubernetes/1.2.1-1.10.6). 156 | -------------------------------------------------------------------------------- /roles/package/action_plugins/dcos_edgelb.py: -------------------------------------------------------------------------------- 1 | """ 2 | Action plugin to configure a DC/OS cluster. 3 | Uses the Ansible host to connect directly to DC/OS. 4 | """ 5 | 6 | from __future__ import (absolute_import, division, print_function) 7 | __metaclass__ = type 8 | 9 | import json 10 | import subprocess 11 | import tempfile 12 | import time 13 | import os 14 | import sys 15 | 16 | from ansible.plugins.action import ActionBase 17 | from ansible.errors import AnsibleActionFail 18 | 19 | # to prevent duplicating code, make sure we can import common stuff 20 | sys.path.append(os.getcwd()) 21 | from action_plugins.common import ensure_dcos, run_command, _dcos_path 22 | 23 | try: 24 | from __main__ import display 25 | except ImportError: 26 | from ansible.utils.display import Display 27 | display = Display() 28 | 29 | def ensure_dcos_edgelb(instance_name): 30 | """Check whether the dcos[cli] edgelb extension is installed.""" 31 | 32 | try: 33 | subprocess.check_output([ 34 | 'dcos', 35 | 'edgelb', 36 | '--name=' + instance_name, 37 | 'ping' 38 | ], env=_dcos_path()).decode() 39 | except: 40 | display.vvv("dcos edgelb: not installed") 41 | install_dcos_edgelb_cli() 42 | subprocess.check_output([ 43 | 'dcos', 44 | 'edgelb', 45 | '--name=' + instance_name, 46 | 'ping' 47 | ], env=_dcos_path()).decode() 48 | 49 | display.vvv("dcos edgelb: all prerequisites seem to be in order") 50 | 51 | def install_dcos_edgelb_cli(): 52 | """Install DC/OS edgelb CLI""" 53 | display.vvv("dcos edgelb: installing cli") 54 | 55 | cmd = [ 56 | 'dcos', 57 | 'package', 58 | 'install', 59 | 'edgelb', 60 | '--cli', 61 | '--yes' 62 | ] 63 | display.vvv(subprocess.check_output(cmd, env=_dcos_path()).decode()) 64 | 65 | def get_pool_state(pool_id, instance_name): 66 | """Get the current state of a pool.""" 67 | r = subprocess.check_output([ 68 | 'dcos', 69 | 'edgelb', 70 | 'list', 71 | '--name=' + instance_name, 72 | '--json' 73 | ], env=_dcos_path()) 74 | pools = json.loads(r) 75 | 76 | display.vvv('looking for pool_id {}'.format(pool_id)) 77 | 78 | state = 'absent' 79 | for p in pools: 80 | try: 81 | if pool_id in p['name']: 82 | state = 'present' 83 | display.vvv('found pool: {}'.format(pool_id)) 84 | 85 | except KeyError: 86 | continue 87 | return state 88 | 89 | def pool_create(pool_id, instance_name, options): 90 | """Create a pool""" 91 | display.vvv("DC/OS: edgelb create pool {}".format(pool_id)) 92 | 93 | # create a temporary file for the options json file 94 | with tempfile.NamedTemporaryFile('w+') as f: 95 | json.dump(options, f) 96 | 97 | # force write the file to disk to make sure subcommand can read it 98 | f.flush() 99 | os.fsync(f) 100 | 101 | display.vvv(subprocess.check_output( 102 | ['cat', f.name]).decode()) 103 | 104 | cmd = [ 105 | 'dcos', 106 | 'edgelb', 107 | 'create', 108 | '--name=' + instance_name, 109 | f.name 110 | ] 111 | run_command(cmd, 'update pool', stop_on_error=True) 112 | 113 | 114 | def pool_update(pool_id, instance_name, options): 115 | """Update an pool""" 116 | display.vvv("DC/OS: Edgelb update pool {}".format(pool_id)) 117 | 118 | # create a temporary file for the options json file 119 | with tempfile.NamedTemporaryFile('w+') as f: 120 | json.dump(options, f) 121 | 122 | # force write the file to disk to make sure subcommand can read it 123 | f.flush() 124 | os.fsync(f) 125 | 126 | display.vvv(subprocess.check_output( 127 | ['cat', f.name]).decode()) 128 | 129 | cmd = [ 130 | 'dcos', 131 | 'edgelb', 132 | 'update', 133 | '--name=' + instance_name, 134 | f.name 135 | ] 136 | run_command(cmd, 'update pool', stop_on_error=True) 137 | 138 | def pool_delete(pool_id, instance_name): 139 | """Delete a pool""" 140 | display.vvv("DC/OS: Edge-LB delete pool {}".format(pool_id)) 141 | 142 | cmd = [ 143 | 'dcos', 144 | 'edgelb', 145 | 'delete', 146 | '--name=' + instance_name, 147 | pool_id, 148 | ] 149 | run_command(cmd, 'delete pool', stop_on_error=True) 150 | 151 | class ActionModule(ActionBase): 152 | def run(self, tmp=None, task_vars=None): 153 | 154 | result = super(ActionModule, self).run(tmp, task_vars) 155 | del tmp # tmp no longer has any effect 156 | 157 | if self._play_context.check_mode: 158 | # in --check mode, always skip this module execution 159 | result['skipped'] = True 160 | result['msg'] = 'The dcos task does not support check mode' 161 | return result 162 | 163 | args = self._task.args 164 | state = args.get('state', 'present') 165 | 166 | instance_name = args.get('instance_name', 'edgelb') 167 | # ensure pool_id has no leading forward slash 168 | pool_id = args.get('pool_id', '').strip('/') 169 | 170 | options = args.get('options') or {} 171 | options['name']= pool_id 172 | 173 | ensure_dcos() 174 | ensure_dcos_edgelb(instance_name) 175 | 176 | current_state = get_pool_state(pool_id, instance_name) 177 | wanted_state = state 178 | 179 | if current_state == wanted_state: 180 | 181 | display.vvv( 182 | "edgelb pool {} already in desired state {}".format(pool_id, wanted_state)) 183 | 184 | if wanted_state == "present": 185 | pool_update(pool_id, instance_name, options) 186 | 187 | result['changed'] = False 188 | else: 189 | display.vvv("edgelb pool {} not in desired state {}".format(pool_id, wanted_state)) 190 | 191 | if wanted_state != 'absent': 192 | pool_create(pool_id, instance_name, options) 193 | else: 194 | pool_delete(pool_id, instance_name) 195 | 196 | result['changed'] = True 197 | 198 | return result 199 | -------------------------------------------------------------------------------- /roles/package/action_plugins/dcos_iam_serviceaccount.py: -------------------------------------------------------------------------------- 1 | """ 2 | Action plugin to configure a DC/OS cluster. 3 | Uses the Ansible host to connect directly to DC/OS. 4 | """ 5 | 6 | from __future__ import (absolute_import, division, print_function) 7 | __metaclass__ = type 8 | 9 | import json 10 | import subprocess 11 | import tempfile 12 | import time 13 | import os 14 | import sys 15 | 16 | from ansible.plugins.action import ActionBase 17 | from ansible.errors import AnsibleActionFail 18 | 19 | # to prevent duplicating code, make sure we can import common stuff 20 | sys.path.append(os.getcwd()) 21 | from action_plugins.common import ( 22 | ensure_dcos, 23 | ensure_dcos_security, 24 | run_command, 25 | _dcos_path 26 | ) 27 | 28 | from action_plugins.dcos_secret import ( 29 | get_secret_value, 30 | secret_delete 31 | ) 32 | try: 33 | from __main__ import display 34 | except ImportError: 35 | from ansible.utils.display import Display 36 | display = Display() 37 | 38 | def get_service_account_state(sid): 39 | """Get the current state of a service_account.""" 40 | 41 | r = subprocess.check_output([ 42 | 'dcos', 43 | 'security', 44 | 'org', 45 | 'service-accounts', 46 | 'show', 47 | '--json' 48 | ], 49 | env=_dcos_path() 50 | ) 51 | service_accounts = json.loads(r) 52 | 53 | display.vvv('looking for sid {}'.format(sid)) 54 | 55 | state = 'absent' 56 | for g in service_accounts: 57 | try: 58 | if sid in g: 59 | state = 'present' 60 | display.vvv('found sid: {}'.format(sid)) 61 | 62 | except KeyError: 63 | continue 64 | return state 65 | 66 | def service_account_create(sid, secret_path, store, description): 67 | """Create a service_account""" 68 | display.vvv("DC/OS: IAM create service_account {}".format(sid)) 69 | 70 | if get_secret_value(secret_path, store) is not None: 71 | secret_delete(secret_path, store) 72 | 73 | with tempfile.NamedTemporaryFile('w+') as f_private: 74 | with tempfile.NamedTemporaryFile('w+') as f_public: 75 | 76 | cmd = [ 77 | 'dcos', 78 | 'security', 79 | 'org', 80 | 'service-accounts', 81 | 'keypair', 82 | f_private.name, f_public.name 83 | ] 84 | run_command(cmd, 'create kepypairs', stop_on_error=True) 85 | 86 | display.vvv(subprocess.check_output( 87 | ['cat', f_private.name]).decode()) 88 | display.vvv(subprocess.check_output( 89 | ['cat', f_public.name]).decode()) 90 | 91 | cmd = [ 92 | 'dcos', 93 | 'security', 94 | 'org', 95 | 'service-accounts', 96 | 'create', 97 | sid, 98 | '--public-key', 99 | f_public.name, 100 | '--description', 101 | description 102 | ] 103 | run_command(cmd, 'create service account', stop_on_error=True) 104 | 105 | cmd = [ 106 | 'dcos', 107 | 'security', 108 | 'secrets', 109 | 'create-sa-secret', 110 | '--store-id', 111 | store, 112 | '--strict', 113 | f_private.name, 114 | sid, 115 | secret_path 116 | ] 117 | run_command(cmd, 'create service secret', stop_on_error=True) 118 | 119 | def service_account_update(sid, groups): 120 | """Update service_account groups""" 121 | display.vvv("DC/OS: IAM update service_account {}".format(sid)) 122 | 123 | for g in groups: 124 | display.vvv("Assigning service_account {} to group {}".format( 125 | sid,g)) 126 | 127 | cmd = [ 128 | 'dcos', 129 | 'security', 130 | 'org', 131 | 'groups', 132 | 'add_user', 133 | g, 134 | sid 135 | ] 136 | run_command(cmd, 'update service_account', stop_on_error=False) 137 | 138 | def service_account_delete(sid): 139 | """Delete a service_account""" 140 | display.vvv("DC/OS: IAM delete service_account {}".format(sid)) 141 | 142 | cmd = [ 143 | 'dcos', 144 | 'security', 145 | 'org', 146 | 'service-accounts', 147 | 'delete', 148 | sid, 149 | ] 150 | run_command(cmd, 'delete service_account', stop_on_error=True) 151 | 152 | class ActionModule(ActionBase): 153 | def run(self, tmp=None, task_vars=None): 154 | 155 | result = super(ActionModule, self).run(tmp, task_vars) 156 | del tmp # tmp no longer has any effect 157 | 158 | if self._play_context.check_mode: 159 | # in --check mode, always skip this module execution 160 | result['skipped'] = True 161 | result['msg'] = 'The dcos task does not support check mode' 162 | return result 163 | 164 | args = self._task.args 165 | sid = args.get('sid') 166 | description = args.get('description', 'Created by Ansible') 167 | secret_path = args.get('secret_path') 168 | store = args.get('store', 'default') 169 | groups = args.get('groups', []) 170 | wanted_state = args.get('state', 'present') 171 | 172 | if sid is None: 173 | raise AnsibleActionFail('sid cannot be empty for dcos_iam_service_account') 174 | 175 | if secret_path is None: 176 | raise AnsibleActionFail('secret_path cannot be empty for dcos_iam_service_account') 177 | 178 | ensure_dcos() 179 | ensure_dcos_security() 180 | 181 | current_state = get_service_account_state(sid) 182 | 183 | if current_state == wanted_state: 184 | 185 | display.vvv( 186 | "DC/OS IAM service_account {} already in desired state {}".format(sid, wanted_state)) 187 | 188 | result['changed'] = False 189 | 190 | if wanted_state == "present": 191 | 192 | if get_secret_value(secret_path, store) is None: 193 | service_account_delete(sid) 194 | service_account_create(sid, secret_path, store, description) 195 | result['changed'] = True 196 | 197 | service_account_update(sid, groups) 198 | 199 | else: 200 | display.vvv("DC/OS: IAM service_account {} not in desired state {}".format(sid, wanted_state)) 201 | 202 | if wanted_state != 'absent': 203 | service_account_create(sid, secret_path, store, description) 204 | service_account_update(sid, groups) 205 | 206 | else: 207 | service_account_delete(sid) 208 | 209 | result['changed'] = True 210 | 211 | return result 212 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright {yyyy} {name of copyright owner} 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | --------------------------------------------------------------------------------