├── .gitignore ├── LICENSE.md ├── README.md ├── defaults └── main.yml ├── handlers └── main.yml ├── meta └── main.yml ├── tasks ├── lighthouse.yml ├── main.yml ├── nebula.yml ├── node.yml ├── preflight.yml └── uninstall.yml ├── templates ├── lighthouse.service.j2 ├── lighthouse_config.yml.j2 ├── nebula-check.sh.j2 ├── node.service.j2 └── node_config.yml.j2 └── vars └── main.yml /.gitignore: -------------------------------------------------------------------------------- 1 | # ssh keys 2 | ansible_* 3 | -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 Andrew Paglusch 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Ansible Role for Nebula 2 | 3 | Quickly and easily deploy the [Nebula Overlay VPN](https://github.com/slackhq/nebula) software onto all of your hosts. 4 | 5 | # What Is Nebula 6 | 7 | > Nebula is a scalable overlay networking tool with a focus on performance, simplicity and security. It lets you seamlessly connect computers anywhere in the world. 8 | 9 | You can read more about Nebula [on the official repo](https://github.com/slackhq/nebula) 10 | 11 | # Example Playbook 12 | ``` 13 | --- 14 | - name: Deploy Nebula 15 | hosts: all 16 | gather_facts: yes 17 | user: ansible 18 | become: yes 19 | vars: 20 | nebula_version: 1.8.0 21 | nebula_network_name: "Company Nebula Mgmt Net" 22 | nebula_network_cidr: 16 23 | 24 | nebula_lighthouse_internal_ip_addr: 10.43.0.1 25 | nebula_lighthouse_public_hostname: lighthouse.company.com 26 | nebula_lighthouse_public_port: 4242 27 | 28 | nebula_firewall_drop_action: reject 29 | 30 | nebula_inbound_rules: 31 | - { port: "any", proto: "icmp", host: "any" } 32 | - { port: 22, proto: "tcp", host: "any" } 33 | nebula_outbound_rules: 34 | - { port: "any", proto: "any", host: "any" } 35 | 36 | roles: 37 | - role: nebula 38 | ``` 39 | 40 | # Example Inventory 41 | ``` 42 | [nebula_lighthouse] 43 | lighthouse01.company.com 44 | 45 | [servers] 46 | web01.company.com nebula_internal_ip_addr=10.43.0.2 47 | docker01.company.com nebula_internal_ip_addr=10.43.0.3 48 | zabbix01.company.com nebula_internal_ip_addr=10.43.0.4 49 | backup01.company.com nebula_internal_ip_addr=10.43.0.5 50 | pbx01.company.com nebula_internal_ip_addr=10.43.0.6 51 | ``` 52 | 53 | **Note:** More variables can be found in the [role defaults.](defaults/main.yml) 54 | 55 | # Running the Playbook 56 | ``` 57 | ansible-playbook -i inventory nebula.yml 58 | ``` 59 | -------------------------------------------------------------------------------- /defaults/main.yml: -------------------------------------------------------------------------------- 1 | nebula_version: 1.8.0 2 | nebula_network_name: "My Nebula Mesh Network" 3 | nebula_network_cidr: 24 4 | nebula_ca_cert_duration: "87600h0m0s" #10 years 5 | nebula_client_cert_duration: "43800h0m0s" #5 years 6 | nebula_clean_install: false 7 | nebula_lighthouse_build_hosts_file: true 8 | nebula_node_lighthouse_in_hosts_file: true 9 | nebula_node_use_lighthouse_as_relay: true 10 | nebula_install_check_cron: true 11 | 12 | 13 | nebula_lighthouse_hostname: lighthouse 14 | nebula_lighthouse_internal_ip_addr: 192.168.77.1 15 | nebula_lighthouse_public_hostname: my-nebula-server.com 16 | nebula_lighthouse_public_port: 4242 17 | nebula_lighthouse_is_relay: true 18 | nebula_lighthouse_extra_config: {} 19 | 20 | nebula_metrics_prometheus_enabled: false 21 | nebula_metrics_prometheus_listen: "127.0.0.1:4244" 22 | nebula_metrics_prometheus_path: "/metrics" 23 | nebula_metrics_prometheus_namespace: nebula 24 | nebula_metrics_prometheus_interval: 10s 25 | 26 | nebula_firewall_block_action: drop 27 | 28 | nebula_inbound_rules: 29 | - { port: "any", proto: "any", host: "any" } 30 | nebula_outbound_rules: 31 | - { port: "any", proto: "any", host: "any" } 32 | 33 | -------------------------------------------------------------------------------- /handlers/main.yml: -------------------------------------------------------------------------------- 1 | - name: Restart Nebula 2 | systemd: 3 | name: nebula 4 | state: restarted 5 | when: inventory_hostname not in groups['nebula_lighthouse'] 6 | listen: "restart nebula" 7 | 8 | - name: Restart Lighthouse 9 | systemd: 10 | name: lighthouse 11 | state: restarted 12 | when: inventory_hostname in groups['nebula_lighthouse'] 13 | listen: "restart nebula" 14 | -------------------------------------------------------------------------------- /meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: [] 3 | 4 | galaxy_info: 5 | role_name: nebula 6 | author: AndrewPaglusch 7 | description: Nebula Overlay Mesh VPN 8 | license: "MIT" 9 | min_ansible_version: 2.4 10 | galaxy_tags: 11 | - nebula 12 | - vpn 13 | - network 14 | - overlay 15 | - mesh 16 | -------------------------------------------------------------------------------- /tasks/lighthouse.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Ensure CA cert/key exists 3 | command: 4 | chdir: /opt/nebula 5 | cmd: ./nebula-cert ca -name "{{ nebula_network_name }}" -duration "{{ nebula_ca_cert_duration }}" 6 | creates: /opt/nebula/ca.crt 7 | 8 | - name: Ensure lighthouse cert/key exists 9 | command: 10 | chdir: /opt/nebula 11 | cmd: ./nebula-cert sign -name "{{ nebula_lighthouse_hostname }}" -ip "{{ nebula_lighthouse_internal_ip_addr }}/{{ nebula_network_cidr }}" -duration "{{ nebula_client_cert_duration }}" 12 | creates: "/opt/nebula/{{ nebula_lighthouse_hostname }}.crt" 13 | 14 | - name: Ensure lighthouse is configured 15 | template: 16 | src: lighthouse_config.yml.j2 17 | dest: /opt/nebula/config.yml 18 | owner: root 19 | group: root 20 | mode: '0400' 21 | notify: restart nebula 22 | 23 | - name: Ensure lighthouse service exists 24 | template: 25 | src: lighthouse.service.j2 26 | dest: /etc/systemd/system/lighthouse.service 27 | owner: root 28 | group: root 29 | mode: '0644' 30 | 31 | - name: Ensure lighthouse service is enabled and running 32 | systemd: 33 | name: lighthouse 34 | daemon_reload: yes 35 | enabled: yes 36 | masked: no 37 | state: started 38 | -------------------------------------------------------------------------------- /tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Nebula Install 3 | block: 4 | - name: Uninstall Nebula (clean install) 5 | include_tasks: uninstall.yml 6 | when: nebula_clean_install|bool 7 | 8 | - name: Preflight checks 9 | include_tasks: preflight.yml 10 | 11 | - name: Install Nebula on all hosts 12 | include_tasks: nebula.yml 13 | 14 | - name: Deploy Lighthouse 15 | include_tasks: lighthouse.yml 16 | when: inventory_hostname in groups['nebula_lighthouse'] 17 | 18 | - name: Deploy Nebula Node 19 | include_tasks: node.yml 20 | when: inventory_hostname not in groups['nebula_lighthouse'] 21 | when: inventory_hostname in groups['nebula_lighthouse'] or nebula_internal_ip_addr is defined 22 | -------------------------------------------------------------------------------- /tasks/nebula.yml: -------------------------------------------------------------------------------- 1 | - name: Ensure /opt/nebula directory exists 2 | file: 3 | path: /opt/nebula 4 | state: directory 5 | mode: '0700' 6 | owner: root 7 | group: root 8 | 9 | - name: Check for existing Nebula install 10 | stat: 11 | path: '/opt/nebula/nebula' 12 | register: installed_nebula_stats 13 | 14 | - name: Get Nebula version (if installed) 15 | command: "/opt/nebula/nebula -version" 16 | register: installed_nebula_version_out 17 | changed_when: False 18 | failed_when: False 19 | when: installed_nebula_stats.stat.exists 20 | 21 | - name: Extract Nebula version from command output 22 | set_fact: 23 | installed_nebula_version: "{{ installed_nebula_version_out.stdout.split(' ')[1] }}" 24 | when: installed_nebula_stats.stat.exists 25 | 26 | - name: Download & Extract Nebula 27 | unarchive: 28 | src: "https://github.com/slackhq/nebula/releases/download/v{{ nebula_version }}/nebula-linux-{{ nebula_architectures[ansible_architecture] }}.tar.gz" 29 | dest: "/opt/nebula" 30 | remote_src: yes 31 | when: (installed_nebula_version|default(nebula_version) != nebula_version) or (not installed_nebula_stats.stat.exists) 32 | notify: restart nebula 33 | 34 | - name: Ensure Nebula binaries permissions are correct 35 | file: 36 | path: "/opt/nebula/{{ item }}" 37 | owner: root 38 | group: root 39 | mode: '0700' 40 | with_items: 41 | - nebula 42 | - nebula-cert 43 | -------------------------------------------------------------------------------- /tasks/node.yml: -------------------------------------------------------------------------------- 1 | - name: Check if node certificate exists on lighthouse 2 | stat: 3 | path: /opt/nebula/{{ inventory_hostname }}.crt 4 | delegate_to: "{{ groups.nebula_lighthouse[0] }}" 5 | register: cert_stat 6 | 7 | - name: Get information about existing certificate (if it exists) 8 | command: "/opt/nebula/nebula-cert print -json -path /opt/nebula/{{ inventory_hostname }}.crt" 9 | delegate_to: "{{ groups.nebula_lighthouse[0] }}" 10 | changed_when: false 11 | when: cert_stat.stat.exists 12 | register: current_cert_json 13 | ignore_errors: yes 14 | 15 | - name: Parse the IP address from the certificate details (if it exists) 16 | set_fact: 17 | current_cert_ip: "{{ current_cert_json.stdout | from_json | json_query('details.ips[0]') }}" 18 | when: 19 | - cert_stat.stat.exists 20 | - current_cert_json.stdout != "" 21 | 22 | - name: Print IP address from cert (if one exists) 23 | debug: 24 | msg: "IP Address in Cert: {{ current_cert_ip }}, Expected IP Address: {{ nebula_internal_ip_addr }}/{{ nebula_network_cidr }}" 25 | when: cert_stat.stat.exists 26 | 27 | - name: Delete invalid node certificate and key from lighthouse (wrong IP address) 28 | file: 29 | path: "/opt/nebula/{{ item }}" 30 | state: absent 31 | delegate_to: "{{ groups.nebula_lighthouse[0] }}" 32 | with_items: 33 | - "{{ inventory_hostname }}.crt" 34 | - "{{ inventory_hostname }}.key" 35 | when: 36 | - cert_stat.stat.exists 37 | - current_cert_ip != nebula_internal_ip_addr|string + '/' + nebula_network_cidr|string 38 | 39 | - name: Ensure a cert/key exists for each node on lighthouse 40 | command: 41 | chdir: /opt/nebula 42 | cmd: ./nebula-cert sign -name "{{ inventory_hostname }}" -ip "{{ nebula_internal_ip_addr }}/{{ nebula_network_cidr }}" -duration "{{ nebula_client_cert_duration }}" 43 | delegate_to: "{{ groups.nebula_lighthouse[0] }}" 44 | when: not cert_stat.stat.exists or current_cert_ip != nebula_internal_ip_addr|string + '/' + nebula_network_cidr|string 45 | 46 | - name: Ensure lighthouse has hosts file entry for node 47 | lineinfile: 48 | path: /etc/hosts 49 | line: "{{ nebula_internal_ip_addr }} {{ inventory_hostname }}.neb" 50 | delegate_to: "{{ groups.nebula_lighthouse[0] }}" 51 | when: nebula_lighthouse_build_hosts_file 52 | 53 | - name: Ensure node has hosts file entry for lighthouse 54 | lineinfile: 55 | path: /etc/hosts 56 | line: "{{ nebula_lighthouse_internal_ip_addr }} {{ nebula_lighthouse_hostname }}.neb" 57 | when: nebula_node_lighthouse_in_hosts_file 58 | 59 | - name: Read cert/key from lighthouse 60 | slurp: 61 | src: "/opt/nebula/{{ item }}" 62 | register: lighthouse_files 63 | delegate_to: "{{ groups.nebula_lighthouse[0] }}" 64 | with_items: 65 | - "{{ inventory_hostname }}.crt" 66 | - "{{ inventory_hostname }}.key" 67 | - ca.crt 68 | 69 | - name: Ensure Cert, Key, CA files exist 70 | copy: 71 | dest: "/opt/nebula/{{ item['item'] }}" 72 | content: "{{ item['content'] | b64decode }}" 73 | owner: root 74 | group: root 75 | mode: 0600 76 | loop: "{{ lighthouse_files.results }}" 77 | loop_control: 78 | label: "{{ item['item'] }}" 79 | 80 | - name: Ensure Nebula is configured 81 | template: 82 | src: node_config.yml.j2 83 | dest: /opt/nebula/config.yml 84 | owner: root 85 | group: root 86 | mode: '0400' 87 | notify: restart nebula 88 | 89 | - name: Ensure Nebula service exists 90 | template: 91 | src: node.service.j2 92 | dest: /etc/systemd/system/nebula.service 93 | owner: root 94 | group: root 95 | mode: '0644' 96 | 97 | - name: Ensure Nebula service is enabled and running 98 | systemd: 99 | name: nebula 100 | daemon_reload: yes 101 | enabled: yes 102 | masked: no 103 | state: started 104 | 105 | - name: Ensure nebula-check is present 106 | template: 107 | src: nebula-check.sh.j2 108 | dest: /opt/nebula/nebula-check.sh 109 | owner: root 110 | group: root 111 | mode: '0755' 112 | when: nebula_install_check_cron|bool 113 | 114 | - name: Ensure nebula-check is scheduled via cron 115 | cron: 116 | name: "nebula-check" 117 | minute: "{{ nebula_check_cron_minute | default('*/5') }}" 118 | job: "/opt/nebula/nebula-check.sh" 119 | when: nebula_install_check_cron|bool 120 | 121 | -------------------------------------------------------------------------------- /tasks/preflight.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Preflight checks 3 | block: 4 | - name: Collecting all nebula_internal_ip_addr values 5 | set_fact: 6 | sorted_ips: >- 7 | {{ hostvars 8 | | dict2items 9 | | selectattr('value.nebula_internal_ip_addr', 'defined') 10 | | map(attribute='value.nebula_internal_ip_addr') 11 | | list 12 | | sort 13 | }} 14 | 15 | - name: Initialize duplicated_ips list 16 | set_fact: 17 | duplicated_ips: [] 18 | 19 | - name: Looking for duplicate IP addresses 20 | set_fact: 21 | duplicated_ips: "{{ duplicated_ips + [item] }}" 22 | loop: "{{ sorted_ips | unique }}" 23 | when: "sorted_ips | select('equalto', item) | list | length > 1" 24 | 25 | - name: Fail if duplicate IP addresses are found 26 | fail: 27 | msg: "You have one or more hosts with duplicate IP addresses assigned: {{ duplicated_ips }}" 28 | when: duplicated_ips | length > 0 29 | run_once: true 30 | -------------------------------------------------------------------------------- /tasks/uninstall.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Remove Nebula Lighthouse 3 | block: 4 | - name: Stop lighthouse service 5 | systemd: 6 | name: lighthouse 7 | state: stopped 8 | daemon_reload: yes 9 | enabled: no 10 | ignore_errors: true 11 | 12 | - name: Remove lighthouse systemd service 13 | file: 14 | path: /etc/systemd/system/lighthouse.service 15 | state: absent 16 | when: inventory_hostname in groups['nebula_lighthouse'] 17 | 18 | - name: Remove Nebula nodes 19 | block: 20 | - name: Stop nebula service 21 | systemd: 22 | name: nebula 23 | state: stopped 24 | daemon_reload: yes 25 | enabled: no 26 | ignore_errors: true 27 | 28 | - name: Remove nebula systemd service 29 | file: 30 | path: /etc/systemd/system/nebula.service 31 | state: absent 32 | when: inventory_hostname not in groups['nebula_lighthouse'] 33 | 34 | - name: Remove Nebula installation directory 35 | file: 36 | path: /opt/nebula 37 | state: absent 38 | 39 | - name: Remove Nebula check from cron 40 | cron: 41 | name: "nebula-check" 42 | state: absent 43 | 44 | - name: Clear .neb entries from /etc/hosts 45 | replace: 46 | path: /etc/hosts 47 | backup: yes 48 | regexp: '^.+\.neb$\n' 49 | -------------------------------------------------------------------------------- /templates/lighthouse.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Nebula overlay networking tool - Lighthouse 3 | Wants=basic.target network-online.target nss-lookup.target time-sync.target 4 | After=basic.target network.target network-online.target 5 | Before=sshd.service 6 | 7 | [Service] 8 | Type=notify 9 | NotifyAccess=main 10 | SyslogIdentifier=nebula 11 | ExecReload=/bin/kill -HUP $MAINPID 12 | ExecStart=/opt/nebula/nebula -config /opt/nebula/config.yml 13 | Restart=always 14 | RestartSec=42s 15 | 16 | [Install] 17 | WantedBy=multi-user.target 18 | 19 | -------------------------------------------------------------------------------- /templates/lighthouse_config.yml.j2: -------------------------------------------------------------------------------- 1 | pki: 2 | # every node needs a copy of ca.crt, .key, 3 | # and .crt 4 | ca: /opt/nebula/ca.crt 5 | cert: /opt/nebula/{{ nebula_lighthouse_hostname }}.crt 6 | key: /opt/nebula/{{ nebula_lighthouse_hostname }}.key 7 | 8 | static_host_map: 9 | # how to find one or more lighthouse nodes 10 | # you do NOT need every node to be listed here! 11 | # Similar to "trackers" for torrents 12 | # 13 | # format "": [":[port] or :[port]"] 14 | # 15 | "{{ nebula_lighthouse_internal_ip_addr }}": ["{{ nebula_lighthouse_public_hostname }}:{{ nebula_lighthouse_public_port }}"] 16 | 17 | lighthouse: 18 | interval: 60 19 | 20 | # if you're a lighthouse, say you're a lighthouse 21 | # 22 | am_lighthouse: true 23 | 24 | hosts: 25 | # If you're a lighthouse, this section should be EMPTY 26 | # or commented out. If you're NOT a lighthouse, list 27 | # lighthouse nodes here, one per line, in the following 28 | # format: 29 | # 30 | # - "192.168.77.1" 31 | {% if nebula_lighthouse_extra_config|length > 0 %} 32 | {{- nebula_lighthouse_extra_config | to_nice_yaml | indent(2) }} 33 | {% endif %} 34 | 35 | listen: 36 | # 0.0.0.0 means "all interfaces," which is probably what you want 37 | # 38 | host: 0.0.0.0 39 | port: {{ nebula_lighthouse_public_port }} 40 | 41 | # "punchy" basically means "send frequent keepalive packets" 42 | # so that your router won't expire and close your NAT tunnels. 43 | # 44 | punchy: true 45 | 46 | # "punch_back" allows the other node to try punching out to you, 47 | # if you're having trouble punching out to it. Useful for stubborn 48 | # networks with symmetric NAT, etc. 49 | # 50 | punch_back: true 51 | 52 | relay: 53 | am_relay: {{ nebula_lighthouse_is_relay }} 54 | use_relays: false 55 | 56 | tun: 57 | # sensible defaults. don't monkey with these unless 58 | # you're CERTAIN you know what you're doing. 59 | # 60 | dev: neb0 61 | drop_local_broadcast: false 62 | drop_multicast: false 63 | tx_queue: 500 64 | mtu: 1300 65 | routes: 66 | 67 | logging: 68 | level: info 69 | format: text 70 | 71 | {% if nebula_metrics_prometheus_enabled %} 72 | stats: 73 | type: prometheus 74 | listen: {{ nebula_metrics_prometheus_listen }} 75 | path: {{ nebula_metrics_prometheus_path }} 76 | namespace: {{ nebula_metrics_prometheus_namespace }} 77 | interval: {{ nebula_metrics_prometheus_interval }} 78 | {% endif %} 79 | 80 | # you NEED this firewall section. 81 | # 82 | # Nebula has its own firewall in addition to anything 83 | # your system has in place, and it's all default deny. 84 | # 85 | # So if you don't specify some rules here, you'll drop 86 | # all traffic, and curse and wonder why you can't ping 87 | # one node from another. 88 | # 89 | firewall: 90 | outbound_action: {{ nebula_firewall_block_action }} 91 | inbound_action: {{ nebula_firewall_block_action }} 92 | conntrack: 93 | tcp_timeout: 120h 94 | udp_timeout: 3m 95 | default_timeout: 10m 96 | max_connections: 100000 97 | 98 | # since everything is default deny, all rules you 99 | # actually SPECIFY here are allow rules. 100 | # 101 | 102 | outbound: 103 | {% for rule in nebula_outbound_rules %} 104 | - port: {{ rule.port }} 105 | proto: {{ rule.proto }} 106 | host: {{ rule.host }} 107 | {% endfor %} 108 | 109 | inbound: 110 | {% for rule in nebula_inbound_rules %} 111 | - port: {{ rule.port }} 112 | proto: {{ rule.proto }} 113 | host: {{ rule.host }} 114 | {% endfor %} 115 | -------------------------------------------------------------------------------- /templates/nebula-check.sh.j2: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | PATH=/usr/bin:/bin:/sbin:/usr/sbin 3 | test="$(ping -q -w10 -W2 {{ nebula_lighthouse_internal_ip_addr }} 2>/dev/null | grep -oP '\d{1,3}(?=%)')" 4 | if [ "$test" == "100" ]; then 5 | logger -s -p user.emerg '!!! Unable to reach Nebula server. Bouncing tunnel neb0... !!!' 6 | systemctl restart nebula.service 7 | fi 8 | -------------------------------------------------------------------------------- /templates/node.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Nebula overlay networking tool 3 | Wants=basic.target network-online.target nss-lookup.target time-sync.target 4 | After=basic.target network.target network-online.target 5 | Before=sshd.service 6 | 7 | [Service] 8 | Type=notify 9 | NotifyAccess=main 10 | SyslogIdentifier=nebula 11 | ExecReload=/bin/kill -HUP $MAINPID 12 | ExecStart=/opt/nebula/nebula -config /opt/nebula/config.yml 13 | Restart=always 14 | RestartSec=42s 15 | 16 | [Install] 17 | WantedBy=multi-user.target 18 | -------------------------------------------------------------------------------- /templates/node_config.yml.j2: -------------------------------------------------------------------------------- 1 | pki: 2 | # every node needs a copy of the CA certificate, 3 | # and its own certificate and key, ONLY. 4 | # 5 | ca: /opt/nebula/ca.crt 6 | cert: /opt/nebula/{{ inventory_hostname }}.crt 7 | key: /opt/nebula/{{ inventory_hostname }}.key 8 | 9 | static_host_map: 10 | # how to find one or more lighthouse nodes 11 | # you do NOT need every node to be listed here! 12 | # 13 | # format "Nebula IP": ["public IP or hostname:port"] 14 | # 15 | "{{ nebula_lighthouse_internal_ip_addr }}": ["{{ nebula_lighthouse_public_hostname }}:{{ nebula_lighthouse_public_port }}"] 16 | 17 | lighthouse: 18 | interval: 60 19 | 20 | # if you're a lighthouse, say you're a lighthouse 21 | # 22 | am_lighthouse: false 23 | 24 | hosts: 25 | # If you're a lighthouse, this section should be EMPTY 26 | # or commented out. If you're NOT a lighthouse, list 27 | # lighthouse nodes here, one per line, in the following 28 | # format: 29 | # 30 | - "{{ nebula_lighthouse_internal_ip_addr }}" 31 | 32 | listen: 33 | # 0.0.0.0 means "all interfaces," which is probably what you want 34 | # 35 | host: 0.0.0.0 36 | port: 4242 37 | 38 | # "punchy" basically means "send frequent keepalive packets" 39 | # so that your router won't expire and close your NAT tunnels. 40 | # 41 | punchy: true 42 | 43 | relay: 44 | am_relay: false 45 | use_relays: {{ nebula_node_use_lighthouse_as_relay }} 46 | relays: 47 | - {{ nebula_lighthouse_internal_ip_addr }} 48 | 49 | # "punch_back" allows the other node to try punching out to you, 50 | # if you're having trouble punching out to it. Useful for stubborn 51 | # networks with symmetric NAT, etc. 52 | # 53 | punch_back: true 54 | 55 | tun: 56 | # sensible defaults. don't monkey with these unless 57 | # you're CERTAIN you know what you're doing. 58 | # 59 | dev: neb0 60 | drop_local_broadcast: false 61 | drop_multicast: false 62 | tx_queue: 500 63 | mtu: 1300 64 | routes: 65 | 66 | logging: 67 | level: info 68 | format: text 69 | 70 | {% if nebula_metrics_prometheus_enabled %} 71 | stats: 72 | type: prometheus 73 | listen: {{ nebula_metrics_prometheus_listen }} 74 | path: {{ nebula_metrics_prometheus_path }} 75 | namespace: {{ nebula_metrics_prometheus_namespace }} 76 | interval: {{ nebula_metrics_prometheus_interval }} 77 | {% endif %} 78 | 79 | # you NEED this firewall section. 80 | # 81 | # Nebula has its own firewall in addition to anything 82 | # your system has in place, and it's all default deny. 83 | # 84 | # So if you don't specify some rules here, you'll drop 85 | # all traffic, and curse and wonder why you can't ping 86 | # one node from another. 87 | # 88 | firewall: 89 | outbound_action: {{ nebula_firewall_block_action }} 90 | inbound_action: {{ nebula_firewall_block_action }} 91 | conntrack: 92 | tcp_timeout: 120h 93 | udp_timeout: 3m 94 | default_timeout: 10m 95 | max_connections: 100000 96 | 97 | # since everything is default deny, all rules you 98 | # actually SPECIFY here are allow rules. 99 | # 100 | 101 | outbound: 102 | {% for rule in nebula_outbound_rules %} 103 | - port: {{ rule.port }} 104 | proto: {{ rule.proto }} 105 | host: {{ rule.host }} 106 | {% endfor %} 107 | 108 | inbound: 109 | {% for rule in nebula_inbound_rules %} 110 | - port: {{ rule.port }} 111 | proto: {{ rule.proto }} 112 | host: {{ rule.host }} 113 | {% endfor %} 114 | -------------------------------------------------------------------------------- /vars/main.yml: -------------------------------------------------------------------------------- 1 | nebula_architectures: 2 | x86_64: "amd64" 3 | armv7l: "arm-7" 4 | aarch64: "arm64" 5 | --------------------------------------------------------------------------------