├── .gitignore ├── LICENSE ├── README.md ├── cluster.yml ├── group_vars └── all ├── inventory.yml.example ├── lagacy ├── clean.sh ├── clr.sh ├── generate.sh ├── upgrade-eru-agent.sh ├── upgrade-eru-core.sh ├── upgrade-etcd.sh └── upgrade.sh ├── quickstart.sh └── roles ├── calico ├── meta │ └── main.yml ├── tasks │ ├── calico_binary.yml │ ├── calico_config.yml │ ├── calico_kernel.yml │ ├── calico_launch.yml │ ├── calico_resource.yml │ └── main.yml └── templates │ ├── calicoctl.cfg.j2 │ ├── ippool.yml.j2 │ └── profile.yml.j2 ├── core ├── meta │ └── main.yaml ├── tasks │ ├── core_cli.yml │ ├── core_config.yml │ ├── core_launch.yml │ ├── core_resource.yml │ └── main.yml └── templates │ └── core.yaml.j2 ├── docker ├── meta │ └── main.yml ├── tasks │ ├── docker_config.yml │ ├── docker_install.yml │ ├── docker_launch.yml │ ├── docker_ubuntu_install.yml │ └── main.yml └── templates │ ├── daemon.json.j2 │ └── docker.service.j2 ├── essential ├── tasks │ ├── disable-iptables.yml │ ├── main.yml │ └── set_vars.yml └── vars │ ├── pkg-CentOS.yml │ └── pkg-Ubuntu.yml ├── etcd ├── meta │ └── main.yml ├── tasks │ ├── etcd_binary.yml │ ├── etcd_config.yml │ ├── etcd_launch.yml │ └── main.yml ├── templates │ ├── etcd.conf.j2 │ └── etcd.service.j2 └── vars │ └── main.yml ├── node-docker ├── meta │ └── main.yml ├── tasks │ ├── main.yml │ ├── node_docker_cnm.yml │ ├── node_docker_kernel.yml │ └── node_docker_register.yml ├── templates │ ├── barrel.conf.j2 │ ├── barrel.service.j2 │ └── eru-agent.yaml.j2 └── vars │ └── main.yml └── node-yavirt ├── meta └── main.yml ├── tasks ├── main.yml ├── node_yavirt_binary.yml ├── node_yavirt_config.yml ├── node_yavirt_deps.yml ├── node_yavirt_launch.yml └── node_yavirt_register.yml ├── templates ├── yavirtd.service.j2 └── yavirtd.toml.j2 └── vars └── main.yml /.gitignore: -------------------------------------------------------------------------------- 1 | *~ 2 | *.swp 3 | inventory.yml 4 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License 2 | 3 | Copyright (c) 2010-2017 Google, Inc. http://angularjs.org 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in 13 | all copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | THE SOFTWARE. 22 | 23 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | QuickStart 2 | =========== 3 | 4 | Launch a Eru core and agent, run lambda script on it. 5 | 6 | ### Requirements 7 | 8 | * Ubuntu>=1604 9 | * ansible>=2.9.10, jmespath>=0.10.0 10 | * sshpass if ansible runs on macOS 11 | 12 | ### Run Standalone Node 13 | 14 | prepare your inventory and 15 | 16 | ``` 17 | ansible-playbook -i inventory.yml cluster.yml 18 | ``` 19 | 20 | ### Usage 21 | 22 | Let's say we want to run 3 redis server on 3 nodes. 23 | 24 | Step 1: compose yaml spec for deploy 25 | 26 | ``` 27 | cat > /tmp/spec.yaml < $i 9 | done 10 | -------------------------------------------------------------------------------- /lagacy/upgrade-eru-agent.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -eu 2 | 3 | # root 4 | if [[ `whoami` != "root" ]];then 5 | echo "root permission required" 6 | exit -1 7 | fi 8 | 9 | . env.sh 10 | 11 | docker ps -a | grep eru_agent_ | awk '{print $1}' | xargs -l -I{} docker rm -f {} || echo 12 | 13 | ./run-eru-agent.sh 14 | -------------------------------------------------------------------------------- /lagacy/upgrade-eru-core.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -eu 2 | 3 | # root 4 | if [[ `whoami` != "root" ]];then 5 | echo "root permission required" 6 | exit -1 7 | fi 8 | 9 | . env.sh 10 | 11 | docker rm -f ${ERU_CORE_NAME} || echo 12 | 13 | ./run-eru-core.sh 14 | -------------------------------------------------------------------------------- /lagacy/upgrade-etcd.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -eu 2 | 3 | # root 4 | if [[ `whoami` != "root" ]];then 5 | echo "root permission required" 6 | exit -1 7 | fi 8 | 9 | . env.sh 10 | 11 | rm -f /tmp/etcd-${ETCD_VER}-linux-amd64.tar.gz 12 | rm -rf /tmp/etcd-download && mkdir -p /tmp/etcd-download 13 | 14 | curl -L ${ETCD_DOWNLOAD_URL}/${ETCD_VER}/etcd-${ETCD_VER}-linux-amd64.tar.gz -o /tmp/etcd-${ETCD_VER}-linux-amd64.tar.gz 15 | tar xzvf /tmp/etcd-${ETCD_VER}-linux-amd64.tar.gz -C /tmp/etcd-download --strip-components=1 16 | rm -f /tmp/etcd-${ETCD_VER}-linux-amd64.tar.gz 17 | 18 | mv -f /tmp/etcd-download/etcd /usr/bin/etcd 19 | mv -f /tmp/etcd-download/etcdctl /usr/bin/etcdctl 20 | 21 | systemctl restart etcd 22 | -------------------------------------------------------------------------------- /lagacy/upgrade.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -eu 2 | 3 | ./upgrade-etcd.sh 4 | 5 | ./upgrade-eru-core.sh 6 | 7 | ./upgrade-eru-agent.sh 8 | -------------------------------------------------------------------------------- /quickstart.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -eu 2 | 3 | ans_dir=/tmp/quickstart 4 | rm -fr ${ans_dir} 5 | git clone https://github.com/projecteru2/quickstart.git ${ans_dir} 6 | cd ${ans_dir} 7 | 8 | hn=$(ip a show eth0 | grep inet | grep -v inet6 | awk '{print $2}' | awk -F/ '{print $1}') 9 | 10 | cat <inventory.yml 11 | all: 12 | children: 13 | etcd: 14 | hosts: 15 | ${hn}: 16 | etcd_name: etcd0 17 | vars: 18 | etcd_version: v3.3.4 19 | 20 | core: 21 | hosts: 22 | ${hn}: 23 | 24 | node_docker: 25 | hosts: 26 | ${hn}: 27 | node_docker_name: docker0 28 | node_calico_name: calico0 29 | 30 | calico: 31 | children: 32 | core: 33 | node_docker: 34 | vars: 35 | calico_version: v3.4 36 | calico_ippool_name: testpool 37 | calico_ippool_cidr: 10.10.0.0/16 38 | EOF 39 | 40 | rm -fr /var/lib/dpkg/lock-frontend /var/lib/dpkg/lock 41 | dpkg --configure -a 42 | 43 | apt update -y 44 | 45 | ansible-playbook --become -i inventory.yml cluster.yml 46 | 47 | source /etc/profile 48 | -------------------------------------------------------------------------------- /roles/calico/meta/main.yml: -------------------------------------------------------------------------------- 1 | allow_duplicates: no 2 | dependencies: 3 | - role: docker 4 | -------------------------------------------------------------------------------- /roles/calico/tasks/calico_binary.yml: -------------------------------------------------------------------------------- 1 | - name: download calicoctl 2 | uri: 3 | url: https://github.com/projectcalico/calicoctl/releases/download/{{ calico_version }}.0/calicoctl 4 | dest: /usr/bin/calicoctl 5 | follow_redirects: yes 6 | status_code: 7 | - 302 8 | - 304 9 | - 200 10 | 11 | - name: chmod +x calicoctl 12 | file: 13 | path: /usr/bin/calicoctl 14 | mode: '0755' 15 | -------------------------------------------------------------------------------- /roles/calico/tasks/calico_config.yml: -------------------------------------------------------------------------------- 1 | - name: mkdir calicoctl config dir 2 | file: 3 | path: /etc/calico 4 | state: directory 5 | 6 | - name: render calicoctl config 7 | template: 8 | src: calicoctl.cfg.j2 9 | dest: /etc/calico/calicoctl.cfg 10 | -------------------------------------------------------------------------------- /roles/calico/tasks/calico_kernel.yml: -------------------------------------------------------------------------------- 1 | - name: set ip_forward 2 | sysctl: 3 | name: net.ipv4.ip_forward 4 | value: '1' 5 | sysctl_set: yes 6 | state: present 7 | reload: yes 8 | 9 | - name: set nf_conntrack_max 10 | sysctl: 11 | name: net.netfilter.nf_conntrack_max 12 | value: '1000000' 13 | sysctl_set: yes 14 | state: present 15 | reload: yes 16 | -------------------------------------------------------------------------------- /roles/calico/tasks/calico_launch.yml: -------------------------------------------------------------------------------- 1 | - name: inspect calico 2 | shell: docker inspect calico-node 3 | register: inspect_calico 4 | ignore_errors: yes 5 | 6 | - name: run calico node 7 | command: calicoctl node run --name={{ node_calico_name }} --node-image=calico/node:release-{{ calico_version }} --disable-docker-networking 8 | when: inspect_calico.rc != 0 9 | -------------------------------------------------------------------------------- /roles/calico/tasks/calico_resource.yml: -------------------------------------------------------------------------------- 1 | - name: render calico resource yaml 2 | template: 3 | src: "{{ item }}.yml.j2" 4 | dest: /tmp/{{ item }}.yml 5 | loop: 6 | - ippool 7 | - profile 8 | 9 | - name: create calico ippool 10 | shell: calicoctl create -f /tmp/ippool.yml 11 | register: res 12 | failed_when: res.rc != 0 and 'resource already exists' not in res.stdout 13 | 14 | - name: create calico profile 15 | shell: calicoctl create -f /tmp/profile.yml 16 | register: res 17 | failed_when: res.rc != 0 and 'resource already exists' not in res.stdout 18 | -------------------------------------------------------------------------------- /roles/calico/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - include: calico_kernel.yml 2 | tags: 3 | - calico_kernel 4 | 5 | - include: calico_binary.yml 6 | tags: 7 | - calico_binary 8 | 9 | - include: calico_config.yml 10 | tags: 11 | - calico_config 12 | 13 | - include: calico_launch.yml 14 | tags: 15 | - calico_launch 16 | 17 | - include: calico_resource.yml 18 | tags: 19 | - calico_resource 20 | -------------------------------------------------------------------------------- /roles/calico/templates/calicoctl.cfg.j2: -------------------------------------------------------------------------------- 1 | apiVersion: projectcalico.org/v3 2 | 3 | kind: CalicoAPIConfig 4 | metadata: 5 | spec: 6 | datastoreType: "etcdv3" 7 | etcdEndpoints: {{ groups['etcd'] | map('regex_replace', '^', 'http://') | map('regex_replace', '$', ':2379') | join(',') }} 8 | -------------------------------------------------------------------------------- /roles/calico/templates/ippool.yml.j2: -------------------------------------------------------------------------------- 1 | - apiVersion: projectcalico.org/v3 2 | kind: IPPool 3 | metadata: 4 | name: {{ calico_ippool_name }} 5 | spec: 6 | natOutgoing: true 7 | ipipMode: CrossSubnet 8 | cidr: {{ calico_ippool_cidr }} 9 | -------------------------------------------------------------------------------- /roles/calico/templates/profile.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: projectcalico.org/v3 2 | kind: Profile 3 | metadata: 4 | name: {{ calico_ippool_name }} 5 | spec: 6 | egress: 7 | - action: Allow 8 | destination: {} 9 | source: {} 10 | ingress: 11 | - action: Allow 12 | destination: {} 13 | source: {} 14 | -------------------------------------------------------------------------------- /roles/core/meta/main.yaml: -------------------------------------------------------------------------------- 1 | allow_duplicates: no 2 | dependencies: 3 | - role: docker 4 | - role: calico 5 | -------------------------------------------------------------------------------- /roles/core/tasks/core_cli.yml: -------------------------------------------------------------------------------- 1 | - name: add golang repo 2 | apt_repository: 3 | repo: ppa:longsleep/golang-backports 4 | state: present 5 | 6 | - name: add go and cli into $PATH 7 | lineinfile: 8 | path: /etc/profile 9 | line: export PATH=$PATH:/usr/local/go/bin:/root/go/bin 10 | 11 | - name: install golang 12 | unarchive: 13 | src: https://dl.google.com/go/go1.17.linux-amd64.tar.gz 14 | dest : /usr/local 15 | remote_src: yes 16 | 17 | - name: create temporary cli container 18 | docker_container: 19 | name: temp_data_container 20 | image: projecteru2/cli 21 | state: present 22 | volumes: 23 | - your_data_volume:/data 24 | 25 | - name: install cli 26 | command: docker cp temp_data_container:/usr/bin/eru-cli /usr/local/bin 27 | 28 | - name: install cli lib 29 | command: docker cp temp_data_container:/lib/ld-musl-x86_64.so.1 /lib 30 | 31 | - name: remove temporary cli container 32 | docker_container: 33 | name: temp_data_container 34 | state: absent 35 | -------------------------------------------------------------------------------- /roles/core/tasks/core_config.yml: -------------------------------------------------------------------------------- 1 | - name: mkdir core config dir 2 | file: 3 | path: /etc/eru/ 4 | state: directory 5 | 6 | - name: render core config 7 | template: 8 | src: core.yaml.j2 9 | dest: /etc/eru/core.yaml 10 | -------------------------------------------------------------------------------- /roles/core/tasks/core_launch.yml: -------------------------------------------------------------------------------- 1 | - name: remove current if update 2 | shell: docker rm -f eru-core 3 | ignore_errors: yes 4 | when: upgrade 5 | 6 | - name: run core container 7 | shell: docker run -d --name eru-core --net host --restart always -v /etc/eru:/etc/eru projecteru2/core /usr/bin/eru-core 8 | register: res 9 | failed_when: res.rc != 0 and 'Conflict' not in res.stderr 10 | -------------------------------------------------------------------------------- /roles/core/tasks/core_resource.yml: -------------------------------------------------------------------------------- 1 | - name: create pod 2 | shell: eru-cli pod add eru 3 | -------------------------------------------------------------------------------- /roles/core/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - include: core_config.yml 2 | tags: 3 | - core_config 4 | 5 | - include: core_launch.yml 6 | tags: 7 | - core_launch 8 | 9 | - include: core_cli.yml 10 | tags: 11 | - core_cli 12 | 13 | - include: core_resource.yml 14 | tags: 15 | - core_resource 16 | -------------------------------------------------------------------------------- /roles/core/templates/core.yaml.j2: -------------------------------------------------------------------------------- 1 | log_level: DEBUG 2 | bind: ":5001" 3 | statsd: "127.0.0.1:8125" 4 | global_timeout: 300s 5 | lock_timeout: 30s 6 | 7 | etcd: 8 | machines: 9 | {% for host in groups['etcd'] %} 10 | - http://{{ host }}:2379 11 | {% endfor %} 12 | prefix: "/eru-core" 13 | lock_prefix: "core/_lock" 14 | 15 | docker: 16 | log: 17 | type: "json-file" 18 | config: 19 | "max-size": "10m" 20 | network_mode: "bridge" 21 | cert_path: "" 22 | hub: "hub.docker.com" 23 | namespace: "projecteru2" 24 | build_pod: "eru" 25 | local_dns: true 26 | 27 | scheduler: 28 | maxshare: -1 29 | sharebase: 100 30 | -------------------------------------------------------------------------------- /roles/docker/meta/main.yml: -------------------------------------------------------------------------------- 1 | allow_duplicates: no 2 | dependencies: 3 | - role: essential 4 | -------------------------------------------------------------------------------- /roles/docker/tasks/docker_config.yml: -------------------------------------------------------------------------------- 1 | - name: docker daemon.json 2 | template: 3 | src: daemon.json.j2 4 | dest: /etc/docker/daemon.json 5 | 6 | - name: docker service 7 | template: 8 | src: docker.service.j2 9 | dest: /lib/systemd/system/docker.service 10 | -------------------------------------------------------------------------------- /roles/docker/tasks/docker_install.yml: -------------------------------------------------------------------------------- 1 | - name: clean existing config 2 | file: 3 | path: /etc/docker/daemon.json 4 | state: absent 5 | 6 | - include: docker_ubuntu_install.yml 7 | when: ansible_distribution == "Ubuntu" 8 | -------------------------------------------------------------------------------- /roles/docker/tasks/docker_launch.yml: -------------------------------------------------------------------------------- 1 | - name: launch docker 2 | systemd: 3 | name: docker 4 | daemon_reload: yes 5 | state: restarted 6 | enabled: yes 7 | masked: no 8 | -------------------------------------------------------------------------------- /roles/docker/tasks/docker_ubuntu_install.yml: -------------------------------------------------------------------------------- 1 | - name: get lsb_release 2 | shell: lsb_release -cs 3 | register: lsb_release 4 | 5 | - name: add docker GPG key 6 | apt_key: 7 | url: https://download.docker.com/linux/ubuntu/gpg 8 | state: present 9 | 10 | - name: setup docker repo 11 | apt_repository: 12 | repo: deb [arch=amd64] https://download.docker.com/linux/ubuntu {{ lsb_release.stdout }} stable 13 | state: present 14 | 15 | - name: install docker suit 16 | apt: 17 | name: "{{ item }}" 18 | state: latest 19 | update_cache: yes 20 | allow_unauthenticated: yes 21 | loop: 22 | - docker-ce 23 | - docker-ce-cli 24 | - containerd.io 25 | -------------------------------------------------------------------------------- /roles/docker/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - include: docker_install.yml 2 | tags: 3 | - docker_install 4 | 5 | - include: docker_config.yml 6 | tags: 7 | - docker_config 8 | 9 | - include: docker_launch.yml 10 | tags: 11 | - docker_launch 12 | -------------------------------------------------------------------------------- /roles/docker/templates/daemon.json.j2: -------------------------------------------------------------------------------- 1 | { 2 | "hosts": ["unix:///var/run/docker.sock", "tcp://0.0.0.0:2376"], 3 | "cluster-store": "etcd://{% for host in groups['etcd'] %}{{ host }}:2379{% if not loop.last %},{% endif %}{% endfor %}" 4 | } 5 | -------------------------------------------------------------------------------- /roles/docker/templates/docker.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Docker Application Container Engine 3 | Documentation=https://docs.docker.com 4 | BindsTo=containerd.service 5 | After=network-online.target firewalld.service containerd.service 6 | Wants=network-online.target 7 | Requires=docker.socket 8 | 9 | [Service] 10 | Type=notify 11 | # the default is not to use systemd for cgroups because the delegate issues still 12 | # exists and systemd currently does not support the cgroup feature set required 13 | # for containers run by docker 14 | ExecStart=/usr/bin/dockerd --containerd=/run/containerd/containerd.sock 15 | ExecReload=/bin/kill -s HUP $MAINPID 16 | TimeoutSec=0 17 | RestartSec=2 18 | Restart=always 19 | 20 | # Note that StartLimit* options were moved from "Service" to "Unit" in systemd 229. 21 | # Both the old, and new location are accepted by systemd 229 and up, so using the old location 22 | # to make them work for either version of systemd. 23 | StartLimitBurst=3 24 | 25 | # Note that StartLimitInterval was renamed to StartLimitIntervalSec in systemd 230. 26 | # Both the old, and new name are accepted by systemd 230 and up, so using the old name to make 27 | # this option work for either version of systemd. 28 | StartLimitInterval=60s 29 | 30 | # Having non-zero Limit*s causes performance problems due to accounting overhead 31 | # in the kernel. We recommend using cgroups to do container-local accounting. 32 | LimitNOFILE=infinity 33 | LimitNPROC=infinity 34 | LimitCORE=infinity 35 | 36 | # Comment TasksMax if your systemd version does not support it. 37 | # Only systemd 226 and above support this option. 38 | TasksMax=infinity 39 | 40 | # set delegate yes so that systemd does not reset the cgroups of docker containers 41 | Delegate=yes 42 | 43 | # kill only the docker process, not all processes in the cgroup 44 | KillMode=process 45 | 46 | [Install] 47 | WantedBy=multi-user.target 48 | -------------------------------------------------------------------------------- /roles/essential/tasks/disable-iptables.yml: -------------------------------------------------------------------------------- 1 | - name: disable iptables 2 | systemd: 3 | name: iptables 4 | state: stopped 5 | -------------------------------------------------------------------------------- /roles/essential/tasks/main.yml: -------------------------------------------------------------------------------- 1 | 2 | - name: load packages list 3 | include_vars: 4 | file: "../vars/pkg-{{ ansible_distribution }}.yml" 5 | 6 | - name: install packages 7 | package: 8 | name: "{{ item }}" 9 | state: latest 10 | loop: "{{ packages|flatten(levels=1) }}" 11 | 12 | - include: set_vars.yml 13 | 14 | - include: disable-iptables.yml 15 | -------------------------------------------------------------------------------- /roles/essential/tasks/set_vars.yml: -------------------------------------------------------------------------------- 1 | - name: set etcd_endpoints 2 | set_fact: 3 | etcd_endpoints: "{{ groups['etcd'] | map('regex_replace', '^', 'http://') | map('regex_replace', '$', ':2379') | join(',') }}" 4 | core_host: "{{ groups['core'] | first }}" 5 | -------------------------------------------------------------------------------- /roles/essential/vars/pkg-CentOS.yml: -------------------------------------------------------------------------------- 1 | packages: 2 | - epel-release 3 | - yum-utils 4 | - device-mapper-persistent-data 5 | - lvm2 6 | - openssl 7 | - musl-libc-static 8 | -------------------------------------------------------------------------------- /roles/essential/vars/pkg-Ubuntu.yml: -------------------------------------------------------------------------------- 1 | packages: 2 | - lvm2 3 | - openssl 4 | -------------------------------------------------------------------------------- /roles/etcd/meta/main.yml: -------------------------------------------------------------------------------- 1 | allow_duplicates: no 2 | dependencies: 3 | - role: essential 4 | -------------------------------------------------------------------------------- /roles/etcd/tasks/etcd_binary.yml: -------------------------------------------------------------------------------- 1 | - name: download etcd release 2 | uri: 3 | url: https://storage.googleapis.com/etcd/{{ etcd_version }}/{{ etcd_basename }}.tar.gz 4 | dest: /tmp/ 5 | status_code: 6 | - 200 7 | - 304 8 | 9 | - name: extract etcd release 10 | unarchive: 11 | src: /tmp/{{ etcd_basename }}.tar.gz 12 | remote_src: yes 13 | dest: /tmp/ 14 | 15 | - name: cp binaries under $PATH 16 | copy: 17 | src: /tmp/{{ etcd_basename }}/{{ item }} 18 | remote_src: true 19 | dest: /usr/bin/{{ item }} 20 | mode: '0755' 21 | with_items: 22 | - etcd 23 | - etcdctl 24 | -------------------------------------------------------------------------------- /roles/etcd/tasks/etcd_config.yml: -------------------------------------------------------------------------------- 1 | - name: ensure etcd config dir 2 | file: 3 | path: /etc/etcd/ 4 | state: directory 5 | 6 | - name: ensure etcd data dir 7 | file: 8 | path: /var/lib/etcd/ 9 | state: directory 10 | 11 | - name: template etcd config 12 | template: 13 | src: etcd.conf.j2 14 | dest: /etc/etcd/etcd.conf 15 | 16 | - name: template systemd unit file 17 | template: 18 | src: etcd.service.j2 19 | dest: /etc/systemd/system/etcd.service 20 | -------------------------------------------------------------------------------- /roles/etcd/tasks/etcd_launch.yml: -------------------------------------------------------------------------------- 1 | - name: start etcd by systemd 2 | systemd: 3 | name: etcd 4 | daemon_reload: yes 5 | state: started 6 | enabled: yes 7 | masked: no 8 | -------------------------------------------------------------------------------- /roles/etcd/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - include: etcd_binary.yml 2 | tags: 3 | - etcd_binary 4 | 5 | - include: etcd_config.yml 6 | tags: 7 | - etcd_config 8 | 9 | - include: etcd_launch.yml 10 | tags: 11 | - etcd_launch 12 | -------------------------------------------------------------------------------- /roles/etcd/templates/etcd.conf.j2: -------------------------------------------------------------------------------- 1 | name: {{ etcd_name }} 2 | data-dir: /var/lib/etcd 3 | listen-peer-urls: http://0.0.0.0:2380 4 | listen-client-urls: http://0.0.0.0:2379 5 | initial-advertise-peer-urls: http://{{ inventory_hostname }}:2380 6 | advertise-client-urls: http://{{ inventory_hostname }}:2379 7 | initial-cluster: {% for host in groups['etcd'] %}{{ hostvars[host].etcd_name }}=http://{{ host }}:2380{% if not loop.last %},{% endif %}{% endfor %} 8 | 9 | initial-cluster-token: eru 10 | initial-cluster-state: new 11 | 12 | auto-compaction-retention: "1" 13 | quota-backend-bytes: -1 14 | enable-v2: true 15 | -------------------------------------------------------------------------------- /roles/etcd/templates/etcd.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=etcd key-value store 3 | Documentation=https://github.com/etcd-io/etcd 4 | After=network.target 5 | 6 | [Service] 7 | User=root 8 | Type=notify 9 | ExecStart=/usr/bin/etcd --config-file /etc/etcd/etcd.conf 10 | Restart=always 11 | RestartSec=10s 12 | LimitNOFILE=40000 13 | 14 | [Install] 15 | WantedBy=multi-user.target 16 | -------------------------------------------------------------------------------- /roles/etcd/vars/main.yml: -------------------------------------------------------------------------------- 1 | etcd_basename: etcd-{{ etcd_version }}-linux-amd64 2 | -------------------------------------------------------------------------------- /roles/node-docker/meta/main.yml: -------------------------------------------------------------------------------- 1 | allow_duplicates: no 2 | dependencies: 3 | - role: docker 4 | - role: calico 5 | -------------------------------------------------------------------------------- /roles/node-docker/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - include: node_docker_kernel.yml 2 | tags: node_docker_kernel 3 | 4 | - include: node_docker_cnm.yml 5 | tags: 6 | - node_docker_cnm 7 | 8 | - include: node_docker_register.yml 9 | tags: 10 | - node_docker_register 11 | -------------------------------------------------------------------------------- /roles/node-docker/tasks/node_docker_cnm.yml: -------------------------------------------------------------------------------- 1 | - name: download barrel binary 2 | get_url: 3 | url: https://github.com/projecteru2/barrel/releases/download/v21.01.30/barrel_21.01.30_Linux_x86_64.tar.gz 4 | dest: /tmp/barrel.tar.gz 5 | force: yes 6 | 7 | - name: extract barrel binary 8 | unarchive: 9 | src: /tmp/barrel.tar.gz 10 | dest: /usr/bin/ 11 | 12 | - name: configuring barrel 13 | template: 14 | src: barrel.conf.j2 15 | dest: /etc/eru/barrel.conf 16 | 17 | - name: barrel systemd unit file 18 | template: 19 | src: barrel.service.j2 20 | dest: /etc/systemd/system/barrel.service 21 | 22 | - name: start barrel service 23 | systemd: 24 | name: barrel 25 | daemon_reload: yes 26 | state: started 27 | enabled: yes 28 | masked: no 29 | 30 | - name: register barrel to docker 31 | shell: docker network create --driver calico --ipam-driver calico-ipam --subnet {{ calico_ippool_cidr }} {{ calico_ippool_name }} 32 | register: res 33 | failed_when: res.rc != 0 and 'already exists' not in res.stderr 34 | -------------------------------------------------------------------------------- /roles/node-docker/tasks/node_docker_kernel.yml: -------------------------------------------------------------------------------- 1 | - name: disable ipv6 2 | sysctl: 3 | name: "{{ item }}" 4 | value: '1' 5 | sysctl_set: yes 6 | state: present 7 | reload: yes 8 | loop: 9 | - net.ipv6.conf.all.disable_ipv6 10 | - net.ipv6.conf.default.disable_ipv6 11 | -------------------------------------------------------------------------------- /roles/node-docker/tasks/node_docker_register.yml: -------------------------------------------------------------------------------- 1 | - name: eru add docker pod 2 | delegate_to: "{{ core_host }}" 3 | shell: eru-cli pod add docker 4 | 5 | - name: eru add node 6 | delegate_to: "{{ core_host }}" 7 | shell: eru-cli node add --nodename {{ node_docker_name }} --endpoint tcp://{{ inventory_hostname }}:2377 docker 8 | register: res 9 | failed_when: res.rc != 0 and 'Key exists' not in res.stderr 10 | 11 | - name: mkdir /etc/eru 12 | file: 13 | path: /etc/eru/ 14 | state: directory 15 | 16 | - name: render eru agent 17 | template: 18 | src: eru-agent.yaml.j2 19 | dest: /etc/eru/agent.yaml 20 | 21 | - name: run eru agent 22 | delegate_to: "{{ core_host }}" 23 | shell: eru-cli workload deploy --pod docker --node {{ node_docker_name }} --entry agent --file /etc/eru/agent.yaml:/agent.yaml --network host --image projecteru2/agent --cpu-request 0 --memory-request 0 --memory-limit 0 --env ERU_HOSTNAME={{ node_docker_name }} https://raw.githubusercontent.com/projecteru2/agent/master/spec.yaml 24 | -------------------------------------------------------------------------------- /roles/node-docker/templates/barrel.conf.j2: -------------------------------------------------------------------------------- 1 | ETCD_ENDPOINTS=http://127.0.0.1:2379 2 | BARREL_DOCKERD_PATH=unix:///var/run/docker.sock 3 | BARREL_HOSTS=unix:///var/run/barrel.sock,http://0.0.0.0:2377 4 | HOSTNAME={{ node_calico_name }} 5 | -------------------------------------------------------------------------------- /roles/node-docker/templates/barrel.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Eru Docker Proxy 3 | After=network.target 4 | After=network-online.target 5 | Wants=network-online.target 6 | Before=docker.service 7 | 8 | [Service] 9 | Type=simple 10 | # set GOMAXPROCS to number of processors 11 | EnvironmentFile=/etc/eru/barrel.conf 12 | ExecStart=/bin/bash -c "GOMAXPROCS=$(nproc) /usr/bin/eru-barrel" 13 | Restart=on-failure 14 | LimitNOFILE=65536 15 | 16 | [Install] 17 | WantedBy=multi-user.target 18 | -------------------------------------------------------------------------------- /roles/node-docker/templates/eru-agent.yaml.j2: -------------------------------------------------------------------------------- 1 | pid: /tmp/agent.pid 2 | 3 | health_check_interval: 5 4 | health_check_timeout: 10 5 | core: {{ groups['core'] | first }}:5001 6 | 7 | etcd: 8 | machines: 9 | - http://127.0.0.1:2379 10 | 11 | docker: 12 | endpoint: tcp://127.0.0.1:2376 13 | metrics: 14 | step: 30 15 | transfers: 16 | - 127.0.0.1:8125 17 | api: 18 | addr: 127.0.0.1:12345 19 | log: 20 | forwards: 21 | - tcp://127.0.0.1:5411 22 | stdout: False 23 | -------------------------------------------------------------------------------- /roles/node-docker/vars/main.yml: -------------------------------------------------------------------------------- 1 | yavirt_image: harbor.shopeemobile.com/cloud/yavirt:{{ yavirt_image_tag }} 2 | -------------------------------------------------------------------------------- /roles/node-yavirt/meta/main.yml: -------------------------------------------------------------------------------- 1 | allow_duplicates: no 2 | dependencies: 3 | - role: essential 4 | - role: docker 5 | -------------------------------------------------------------------------------- /roles/node-yavirt/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - include: node_yavirt_deps.yml 2 | tags: 3 | - node_yavirt_deps 4 | 5 | - include: node_yavirt_binary.yml 6 | tags: 7 | - node_yavirt_binary 8 | 9 | - include: node_yavirt_config.yml 10 | tags: 11 | - node_yavirt_config 12 | 13 | - include: node_yavirt_launch.yml 14 | tags: 15 | - node_yavirt_launch 16 | 17 | - include: node_yavirt_register.yml 18 | tags: 19 | - node_yavirt_register 20 | -------------------------------------------------------------------------------- /roles/node-yavirt/tasks/node_yavirt_binary.yml: -------------------------------------------------------------------------------- 1 | - name: pull yavirt image 2 | shell: docker pull {{ yavirt_image }} 3 | 4 | - name: mkdir yavirt dir 5 | file: 6 | path: "{{ item }}" 7 | state: directory 8 | loop: 9 | - /opt/yavirtd/ 10 | - /opt/yavirtd/log 11 | - "{{ yavirt_bin }}" 12 | - "{{ yavirt_run }}" 13 | 14 | - name: extract yavirt binary 15 | shell: docker run -it --rm --user root -v {{ yavirt_bin }}:/data {{ yavirt_image }} cp {{ item }} /data/ 16 | loop: 17 | - /usr/local/bin/yavirtd 18 | - /usr/local/bin/yavirtctl 19 | -------------------------------------------------------------------------------- /roles/node-yavirt/tasks/node_yavirt_config.yml: -------------------------------------------------------------------------------- 1 | - name: render yavirt config 2 | template: 3 | src: yavirtd.toml.j2 4 | dest: "{{ yavirt_run }}/yavirtd.toml" 5 | 6 | - name: yavirtctl add host 7 | shell: "{{ yavirt_bin }}/yavirtctl --config {{ yavirt_run }}/yavirtd.toml host add --cpu {{ ansible_processor_count }} --memory {{ ansible_memory_mb.real.total }}000 --storage {{ ansible_mounts | json_query(root_space_query) | first }} --subnet 127.0.0.1 --network calico {{ node_yavirt_name }}" 8 | vars: 9 | root_space_query: "[?mount=='/'].size_total" 10 | 11 | - name: render yavirt service unit file 12 | template: 13 | src: yavirtd.service.j2 14 | dest: /etc/systemd/system/yavirtd.service 15 | -------------------------------------------------------------------------------- /roles/node-yavirt/tasks/node_yavirt_deps.yml: -------------------------------------------------------------------------------- 1 | - name: install deps 2 | apt: 3 | update_cache: yes 4 | allow_unauthenticated: yes 5 | name: "{{ item }}" 6 | loop: 7 | - bridge-utils 8 | - qemu 9 | - qemu-kvm 10 | - libnss-libvirt 11 | - libvirt0 12 | - libvirt-clients 13 | - libvirt-daemon 14 | - libvirt-daemon-driver-storage-gluster 15 | - libvirt-daemon-driver-storage-rbd 16 | - libvirt-daemon-driver-storage-sheepdog 17 | - libvirt-daemon-driver-storage-zfs 18 | - libvirt-daemon-system 19 | - libvirt-dev 20 | - libvirt-doc 21 | - libvirt-sanlock 22 | - libvirt-wireshark 23 | - libguestfs-tools 24 | - libguestfs-dev 25 | 26 | - name: launch libvirtd 27 | systemd: 28 | name: libvirtd 29 | daemon_reload: yes 30 | state: started 31 | enabled: yes 32 | masked: no 33 | -------------------------------------------------------------------------------- /roles/node-yavirt/tasks/node_yavirt_launch.yml: -------------------------------------------------------------------------------- 1 | - name: launch yavirtd 2 | systemd: 3 | name: yavirtd 4 | state: started 5 | enabled: yes 6 | -------------------------------------------------------------------------------- /roles/node-yavirt/tasks/node_yavirt_register.yml: -------------------------------------------------------------------------------- 1 | - name: eru add yavirt pod 2 | delegate_to: "{{ core_host }}" 3 | shell: eru-cli pod add virt 4 | 5 | - name: eru add node 6 | delegate_to: "{{ core_host }}" 7 | shell: eru-cli node add --nodename {{ node_yavirt_name }} --endpoint virt-grpc://{{ inventory_hostname }}:9697 virt 8 | register: res 9 | failed_when: res.rc != 0 and 'Key exists' not in res.stderr 10 | -------------------------------------------------------------------------------- /roles/node-yavirt/templates/yavirtd.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=yavirtd - Yet another virt. daemon 3 | After=network.target 4 | Wants=network-online.target 5 | 6 | [Service] 7 | User=root 8 | PermissionsStartOnly=true 9 | Environment=ETCD_ENDPOINTS=http://{{ core_host }}:2379 10 | Environment=HOSTNAME={{ node_yavirt_name }} 11 | ExecStart=/opt/yavirt-deploy/bin/yavirtd /opt/yavirt-deploy/run/yavirtd.toml 12 | Restart=on-abnormal 13 | RestartSec=10s 14 | 15 | [Install] 16 | WantedBy=multi-user.target 17 | -------------------------------------------------------------------------------- /roles/node-yavirt/templates/yavirtd.toml.j2: -------------------------------------------------------------------------------- 1 | virt_dir = "/opt/yavirtd" 2 | virt_bridge = "yavirbr0" 3 | 4 | log_file = "/opt/yavirtd/log/yavirtd.log" 5 | 6 | calico_pools = ["{{ calico_ippool_name }}"] 7 | 8 | etcd_prefix = "/yavirt-dev/v1" 9 | etcd_endpoints = {{ etcd_endpoints.split(',') }} 10 | 11 | core_addr = "{{ core_host }}:5001" 12 | core_username = "" 13 | core_password = "" 14 | core_status_check_interval = "64s" 15 | -------------------------------------------------------------------------------- /roles/node-yavirt/vars/main.yml: -------------------------------------------------------------------------------- 1 | yavirt_image: harbor.shopeemobile.com/cloud/yavirt:{{ yavirt_image_tag }} 2 | yavirt_bin: /opt/yavirt-deploy/bin 3 | yavirt_run: /opt/yavirt-deploy/run 4 | node_yavirt_name: "{{ inventory_hostname | regex_replace('\\.', '-') }}" 5 | --------------------------------------------------------------------------------