├── infra ├── ansible │ ├── inventory.yaml │ ├── roles │ │ ├── dns_telegraf │ │ │ ├── files │ │ │ │ └── journald.d_conf.yaml │ │ │ ├── tasks │ │ │ │ ├── telegraf_knot.yaml │ │ │ │ ├── datadog.yaml │ │ │ │ └── main.yaml │ │ │ └── templates │ │ │ │ └── telegraf.conf.j2 │ │ ├── knot_recursive │ │ │ ├── templates │ │ │ │ ├── certbot_update_cert.j2 │ │ │ │ ├── tsig.ini.j2 │ │ │ │ ├── certbot.sh.j2 │ │ │ │ └── kresd.conf.j2 │ │ │ └── tasks │ │ │ │ ├── doh.yaml │ │ │ │ └── main.yaml │ │ ├── knot_authoritative │ │ │ ├── files │ │ │ │ ├── cznic-labs-knot-dns.list │ │ │ │ └── knot.apt.preferences.txt │ │ │ ├── tasks │ │ │ │ └── main.yaml │ │ │ └── templates │ │ │ │ └── knot.conf.j2 │ │ ├── dns_ospf │ │ │ ├── templates │ │ │ │ ├── netplan_dummy0.yaml.j2 │ │ │ │ ├── netplan_dummy1.yaml.j2 │ │ │ │ ├── netplan_dummy2.yaml.j2 │ │ │ │ ├── netplan_dummy3.yaml.j2 │ │ │ │ ├── netplan_50_cloud_init.yaml.j2 │ │ │ │ └── iptables.j2 │ │ │ └── tasks │ │ │ │ └── main.yaml │ │ ├── support_account │ │ │ └── tasks │ │ │ │ └── main.yaml │ │ ├── requirements.yml │ │ └── dns_ssh │ │ │ └── tasks │ │ │ └── main.yaml │ ├── .ansible-lint-ignore │ ├── ansible.cfg │ └── dns_server.yaml └── terraform │ ├── mesh_dns_servers │ ├── provider.tf │ ├── vm_recursive.tf │ ├── vm_authoritative.tf │ ├── ansible.tf │ └── vars.tf │ ├── main.tf │ ├── dev_jon.tfvars │ ├── prod_sn11.tfvars │ ├── prod_sn10.tfvars │ ├── prod_sn3.tfvars │ ├── dns.tf │ └── vars.tf ├── .gitignore ├── sld ├── vars.tf ├── provider.tf ├── README.md ├── records.themesh.nyc.tf ├── records.nycmeshconnect.com.tf ├── records.nycmeshconnect.net.tf ├── setup │ ├── one_time_setup.py │ └── README.md └── records.nycmesh.net.tf ├── ha.mesh.nycmesh.net.zone ├── airflow.mesh.nycmesh.net.zone ├── jenkins.mesh.nycmesh.net.zone ├── devairflow.mesh.nycmesh.net.zone ├── jamesinternalprodtwo.mesh.nycmesh.net.zone ├── prox.mesh.nycmesh.net.zone ├── doh.mesh.nycmesh.net.zone ├── nodensrecords.py ├── nycmeshconnect.com.zone ├── nycmeshconnect.net.zone ├── themesh.nyc.zone ├── .github ├── dependabot.yml └── workflows │ ├── deploy.yaml │ ├── pull_request.yaml │ ├── codeql.yml │ ├── sld_terraform.yaml │ ├── scorecard.yml │ └── deploy_dns_environment.yaml ├── deploy_knot.sh ├── generate_nn.py ├── README.md ├── makereverse.py ├── mesh.zone └── requirements.txt /infra/ansible/inventory.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | plugin: cloud.terraform.terraform_provider 3 | project_path: "../terraform" 4 | -------------------------------------------------------------------------------- /infra/ansible/roles/dns_telegraf/files/journald.d_conf.yaml: -------------------------------------------------------------------------------- 1 | logs: 2 | - type: journald 3 | path: /var/log/journal/ 4 | -------------------------------------------------------------------------------- /infra/ansible/.ansible-lint-ignore: -------------------------------------------------------------------------------- 1 | roles/dns_ospf/tasks/main.yaml no-changed-when 2 | roles/knot_authoritative/tasks/main.yaml no-changed-when 3 | -------------------------------------------------------------------------------- /infra/ansible/roles/knot_recursive/templates/certbot_update_cert.j2: -------------------------------------------------------------------------------- 1 | 53 {{ CERTBOT_UPDATE_HOUR }} * * 1 root bash /root/certbot.sh 2>&1 > /dev/null 2 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.arpa.zone 2 | *_ed25519 3 | *_ed25519.pub 4 | infra/terraform/.terraform/* 5 | infra/terraform/.terraform.lock.hcl 6 | .venv/ 7 | env.sh 8 | nn.zone 9 | -------------------------------------------------------------------------------- /infra/ansible/roles/knot_authoritative/files/cznic-labs-knot-dns.list: -------------------------------------------------------------------------------- 1 | deb [signed-by=/usr/share/keyrings/cznic-labs-pkg.gpg] https://pkg.labs.nic.cz/knot-dns bookworm main 2 | -------------------------------------------------------------------------------- /infra/ansible/roles/knot_authoritative/files/knot.apt.preferences.txt: -------------------------------------------------------------------------------- 1 | Package: knot knot-* libdnssec* libzscanner* libknot* python3-libknot* 2 | Pin-Priority: 1001 3 | Pin: version 3.4.6 4 | -------------------------------------------------------------------------------- /infra/ansible/roles/dns_ospf/templates/netplan_dummy0.yaml.j2: -------------------------------------------------------------------------------- 1 | network: 2 | version: 2 3 | renderer: networkd 4 | ethernets: 5 | lo: 6 | dhcp4: no 7 | dhcp6: no 8 | addresses: 9 | - {{ INTERNAL_LISTEN_IP }}/32 -------------------------------------------------------------------------------- /infra/ansible/roles/dns_ospf/templates/netplan_dummy1.yaml.j2: -------------------------------------------------------------------------------- 1 | network: 2 | version: 2 3 | renderer: networkd 4 | ethernets: 5 | lo: 6 | dhcp4: no 7 | dhcp6: no 8 | addresses: 9 | - {{ EXTERNAL_LISTEN_IP }}/32 -------------------------------------------------------------------------------- /infra/ansible/roles/support_account/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | - name: Add the user 'support' 2 | ansible.builtin.user: 3 | name: support 4 | uid: 1001 5 | password: "{{ LOCAL_PASSWORD | password_hash('sha512') }}" 6 | groups: sudo 7 | -------------------------------------------------------------------------------- /infra/ansible/roles/dns_ospf/templates/netplan_dummy2.yaml.j2: -------------------------------------------------------------------------------- 1 | network: 2 | version: 2 3 | renderer: networkd 4 | ethernets: 5 | lo: 6 | dhcp4: no 7 | dhcp6: no 8 | addresses: 9 | - {{ bird_router_id }}/32 10 | -------------------------------------------------------------------------------- /infra/ansible/roles/dns_ospf/templates/netplan_dummy3.yaml.j2: -------------------------------------------------------------------------------- 1 | network: 2 | version: 2 3 | renderer: networkd 4 | ethernets: 5 | lo: 6 | dhcp4: no 7 | dhcp6: no 8 | addresses: 9 | - {{ EXTERNAL_OUTGOING_IP }}/32 10 | -------------------------------------------------------------------------------- /sld/vars.tf: -------------------------------------------------------------------------------- 1 | variable "name_dot_com_user" { 2 | type = string 3 | description = "username" 4 | sensitive = true 5 | } 6 | 7 | variable "name_dot_com_token" { 8 | type = string 9 | description = "token" 10 | sensitive = true 11 | } 12 | -------------------------------------------------------------------------------- /ha.mesh.nycmesh.net.zone: -------------------------------------------------------------------------------- 1 | $TTL 300 2 | @ SOA ( nycmesh-713-dns-auth-3 hostmaster.nycmesh.net. 2024120100 1d 2h 4w 1h ) 3 | @ NS nycmesh-713-dns-auth-3 4 | 5 | ; Authoritative DNS servers 6 | nycmesh-713-dns-auth-3 A 199.170.132.47 7 | 8 | ; ha.mesh.nycmesh.net 9 | @ A 10.70.147.86 10 | -------------------------------------------------------------------------------- /airflow.mesh.nycmesh.net.zone: -------------------------------------------------------------------------------- 1 | $TTL 300 2 | @ SOA ( nycmesh-713-dns-auth-3 hostmaster.nycmesh.net. 2024120100 1d 2h 4w 1h ) 3 | @ NS nycmesh-713-dns-auth-3 4 | 5 | ; Authoritative DNS servers 6 | nycmesh-713-dns-auth-3 A 199.170.132.47 7 | 8 | ; airflow.mesh.nycmesh.net 9 | @ A 10.70.90.209 10 | -------------------------------------------------------------------------------- /infra/terraform/mesh_dns_servers/provider.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | ansible = { 4 | source = "ansible/ansible" 5 | version = "1.3.0" 6 | } 7 | proxmox = { 8 | source = "telmate/proxmox" 9 | version = "3.0.2-rc05" 10 | } 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /jenkins.mesh.nycmesh.net.zone: -------------------------------------------------------------------------------- 1 | $TTL 300 2 | @ SOA ( nycmesh-713-dns-auth-3 hostmaster.nycmesh.net. 2024120100 1d 2h 4w 1h ) 3 | @ NS nycmesh-713-dns-auth-3 4 | 5 | ; Authoritative DNS servers 6 | nycmesh-713-dns-auth-3 A 199.170.132.47 7 | 8 | ; jenkins.mesh.nycmesh.net 9 | @ A 10.70.90.209 10 | -------------------------------------------------------------------------------- /infra/ansible/ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | host_key_checking = False 3 | callbacks_enabled = timer, profile_tasks, profile_roles 4 | pipelining = True 5 | 6 | [ssh_connection] 7 | ssh_args = '-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o CheckHostIP=no -o ControlMaster=auto -o ControlPersist=60s' 8 | -------------------------------------------------------------------------------- /devairflow.mesh.nycmesh.net.zone: -------------------------------------------------------------------------------- 1 | $TTL 300 2 | @ SOA ( nycmesh-713-dns-auth-3 hostmaster.nycmesh.net. 2024120100 1d 2h 4w 1h ) 3 | @ NS nycmesh-713-dns-auth-3 4 | 5 | ; Authoritative DNS servers 6 | nycmesh-713-dns-auth-3 A 199.170.132.47 7 | 8 | ; devairflow.mesh.nycmesh.net 9 | @ A 10.70.90.210 10 | -------------------------------------------------------------------------------- /jamesinternalprodtwo.mesh.nycmesh.net.zone: -------------------------------------------------------------------------------- 1 | $TTL 300 2 | @ SOA ( nycmesh-10-dns-auth-5 hostmaster.nycmesh.net. 2024120100 1d 2h 4w 1h ) 3 | @ NS nycmesh-10-dns-auth-5 4 | 5 | ; Authoritative DNS servers 6 | nycmesh-10-dns-auth-5 A 23.158.16.23 7 | 8 | ; jamesinternalprodtwo.mesh.nycmesh.net 9 | @ A 10.70.100.63 10 | -------------------------------------------------------------------------------- /infra/ansible/roles/requirements.yml: -------------------------------------------------------------------------------- 1 | collections: 2 | - name: datadog.dd 3 | version: 5.8.0 4 | - name: cloud.terraform 5 | version: 3.0.0 6 | - name: ansible.posix 7 | version: 1.6.2 8 | - name: nycmesh.common 9 | source: git+https://github.com/nycmeshnet/nycmesh-ansible.git 10 | type: git 11 | version: main 12 | -------------------------------------------------------------------------------- /infra/ansible/roles/knot_recursive/templates/tsig.ini.j2: -------------------------------------------------------------------------------- 1 | # Target DNS server 2 | dns_rfc2136_server = {{ MAIN_AUTH_SERVER_DOH }} 3 | # Target DNS port 4 | dns_rfc2136_port = 53 5 | # TSIG key name 6 | dns_rfc2136_name = doh.mesh.nycmesh.net 7 | # TSIG key secret 8 | dns_rfc2136_secret = {{ TSIG_KEY_DOH }} 9 | # TSIG key algorithm 10 | dns_rfc2136_algorithm = HMAC-SHA512 11 | -------------------------------------------------------------------------------- /prox.mesh.nycmesh.net.zone: -------------------------------------------------------------------------------- 1 | $TTL 300 2 | @ SOA ( nycmesh-713-dns-auth-3 hostmaster.nycmesh.net. 2024120100 1d 2h 4w 1h ) 3 | @ NS nycmesh-713-dns-auth-3 4 | 5 | ; Authoritative DNS servers 6 | nycmesh-713-dns-auth-3 A 199.170.132.47 7 | 8 | jon A 10.70.90.52 9 | nycmesh-10-r630-01 A 10.70.103.186 10 | nycmesh-713-r640-01 A 10.70.90.195 11 | nycmesh-11 A 10.70.104.12 12 | -------------------------------------------------------------------------------- /infra/ansible/roles/dns_ssh/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | - name: Disable password auth 2 | ansible.builtin.lineinfile: 3 | dest: /etc/ssh/sshd_config 4 | regexp: '^#?\s*PasswordAuthentication\s' 5 | line: 'PasswordAuthentication no' 6 | state: present 7 | 8 | - name: Restart and enable sshd service 9 | ansible.builtin.service: 10 | name: sshd 11 | state: reloaded 12 | enabled: true 13 | -------------------------------------------------------------------------------- /doh.mesh.nycmesh.net.zone: -------------------------------------------------------------------------------- 1 | $TTL 300 2 | @ SOA ( nycmesh-713-dns-auth-3 hostmaster.nycmesh.net. 2024120100 1d 2h 4w 1h ) 3 | @ NS nycmesh-713-dns-auth-3 4 | 5 | ; Authoritative DNS servers 6 | nycmesh-713-dns-auth-3 A 199.170.132.47 7 | 8 | ; Recursive resolvers 9 | nycmesh-713-dns-rec-4 A 10.10.10.10 10 | nycmesh-713-dns-rec-5 A 10.10.10.11 11 | 12 | ; doh.mesh.nycmesh.net 13 | @ A 10.10.10.10 14 | @ A 10.10.10.11 15 | -------------------------------------------------------------------------------- /sld/provider.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | backend "s3" { 3 | # Chang to the path to use within your bucket 4 | key = "terraform/state/nycmesh-sld.tfstate" 5 | region = "us-east-1" 6 | } 7 | required_providers { 8 | namedotcom = { 9 | source = "lexfrei/namedotcom" 10 | version = "1.3.1" 11 | } 12 | } 13 | } 14 | 15 | provider "namedotcom" { 16 | username = var.name_dot_com_user 17 | token = var.name_dot_com_token 18 | } 19 | -------------------------------------------------------------------------------- /nodensrecords.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | def nodetoiprange(nn): 4 | return ( 5 | 10, 6 | 96 + (nn>>10), 7 | (nn>>2)&255, 8 | ((nn&3)<<6) 9 | ) 10 | 11 | for nn in range(8192): 12 | cidr = nodetoiprange(nn) 13 | rtrip = "{}.{}.{}.{}".format(cidr[0], cidr[1], cidr[2], cidr[3]+1) 14 | print("nycmesh-{}-rtr A {}".format(nn, rtrip)) 15 | print("n{} NS {}".format(nn, "nycmesh-{}-rtr".format(nn))) 16 | -------------------------------------------------------------------------------- /infra/ansible/dns_server.yaml: -------------------------------------------------------------------------------- 1 | - name: Knot authoritative 2 | hosts: knot-authoritative 3 | become: true 4 | roles: 5 | - role: dns_ssh 6 | - role: dns_ospf 7 | - role: dns_telegraf 8 | - role: knot_authoritative 9 | - role: support_account 10 | 11 | - name: Knot recursive 12 | hosts: knot-recursive 13 | become: true 14 | roles: 15 | - role: dns_ssh 16 | - role: dns_ospf 17 | - role: dns_telegraf 18 | - role: knot_recursive 19 | - role: support_account 20 | -------------------------------------------------------------------------------- /nycmeshconnect.com.zone: -------------------------------------------------------------------------------- 1 | $TTL 3600 2 | @ SOA ( nycmesh-713-dns-auth-3 hostmaster.nycmesh.net. 2024101000 1d 2h 4w 1h ) 3 | @ NS nycmesh-713-dns-auth-3 4 | @ NS nycmesh-10-dns-auth-5 5 | 6 | ; GitHub Pages - https://github.com/nycmeshnet/connect-redirect 7 | @ A 185.199.108.153 8 | @ A 185.199.109.153 9 | @ A 185.199.110.153 10 | @ A 185.199.111.153 11 | www CNAME nycmeshnet.github.io 12 | 13 | ; Authoritative DNS servers 14 | nycmesh-713-dns-auth-3 A 199.170.132.47 15 | nycmesh-10-dns-auth-5 A 23.158.16.23 16 | -------------------------------------------------------------------------------- /nycmeshconnect.net.zone: -------------------------------------------------------------------------------- 1 | $TTL 3600 2 | @ SOA ( nycmesh-713-dns-auth-3 hostmaster.nycmesh.net. 2024101000 1d 2h 4w 1h ) 3 | @ NS nycmesh-713-dns-auth-3 4 | @ NS nycmesh-10-dns-auth-5 5 | 6 | ; GitHub Pages - https://github.com/nycmeshnet/connect/blob/main/CNAME 7 | @ A 185.199.108.153 8 | @ A 185.199.109.153 9 | @ A 185.199.110.153 10 | @ A 185.199.111.153 11 | www CNAME nycmeshnet.github.io 12 | 13 | ; Authoritative DNS servers 14 | nycmesh-713-dns-auth-3 A 199.170.132.47 15 | nycmesh-10-dns-auth-5 A 23.158.16.23 16 | -------------------------------------------------------------------------------- /themesh.nyc.zone: -------------------------------------------------------------------------------- 1 | $TTL 3600 2 | @ SOA ( nycmesh-713-dns-auth-3 hostmaster.nycmesh.net. 2024101000 1d 2h 4w 1h ) 3 | @ NS nycmesh-713-dns-auth-3 4 | @ NS nycmesh-10-dns-auth-5 5 | 6 | ; GitHub Pages - https://github.com/nycmeshnet/themesh.nyc/blob/main/CNAME 7 | @ A 185.199.108.153 8 | @ A 185.199.109.153 9 | @ A 185.199.110.153 10 | @ A 185.199.111.153 11 | www CNAME nycmeshnet.github.io 12 | 13 | ; Authoritative DNS servers 14 | nycmesh-713-dns-auth-3 A 199.170.132.47 15 | nycmesh-10-dns-auth-5 A 23.158.16.23 16 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: "pip" 4 | directory: "/" 5 | schedule: 6 | interval: "cron" 7 | cronjob: "41 6 * * *" 8 | assignees: 9 | - "james-otten" 10 | - package-ecosystem: "terraform" 11 | directories: 12 | - "/infra/terraform/" 13 | - "/sld/" 14 | schedule: 15 | interval: "cron" 16 | cronjob: "4 6 * * *" 17 | assignees: 18 | - "james-otten" 19 | - package-ecosystem: "github-actions" 20 | directory: "/" 21 | schedule: 22 | interval: "cron" 23 | cronjob: "44 6 * * *" 24 | assignees: 25 | - "james-otten" 26 | -------------------------------------------------------------------------------- /deploy_knot.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Based on https://github.com/nycmeshnet/nycmesh-dns/blob/master/deploy.sh 4 | # Modified for IaC managed servers 5 | 6 | cd /root/nycmesh-dns 7 | git pull 8 | 9 | NEWCOMMIT=`git rev-parse HEAD` 10 | OLDCOMMIT=`cat /var/lib/knot/commit` 11 | 12 | if [ "$NEWCOMMIT" == "$OLDCOMMIT" ] 13 | then 14 | exit 0 15 | fi 16 | 17 | python3 generate_nn.py 18 | python3 makereverse.py 19 | cp -f *.zone /var/lib/knot/zones 20 | chown knot:knot /var/lib/knot/zones/*.zone 21 | chmod 664 /var/lib/knot/zones/*.zone 22 | 23 | systemctl restart knot 24 | sleep 2 25 | 26 | git rev-parse HEAD > /var/lib/knot/commit -------------------------------------------------------------------------------- /infra/ansible/roles/dns_telegraf/tasks/telegraf_knot.yaml: -------------------------------------------------------------------------------- 1 | - name: Download telegraf knot monitoring script 2 | ansible.builtin.get_url: 3 | url: https://raw.githubusercontent.com/x70b1/telegraf-knot/master/telegraf-knot.sh 4 | dest: /usr/bin/telegraf-knot.sh 5 | checksum: sha256:1d2049100902a7e8ef3aa05a0ab25f5c1841a80bdcca0c2165057c4755ff11d1 6 | mode: "755" 7 | 8 | - name: Sudoer entry for telegraf knot 9 | ansible.builtin.lineinfile: 10 | path: /etc/sudoers.d/telegraf-knot 11 | create: true 12 | line: "telegraf ALL=(ALL) NOPASSWD: /usr/sbin/knotc -f -s /run/knot/knot.sock stats" 13 | mode: "600" 14 | -------------------------------------------------------------------------------- /infra/ansible/roles/dns_ospf/templates/netplan_50_cloud_init.yaml.j2: -------------------------------------------------------------------------------- 1 | network: 2 | version: 2 3 | ethernets: 4 | eth0: 5 | dhcp4: false 6 | dhcp6: false 7 | addresses: 8 | - {{ INTERNAL_MGT_IP }}/{{ INTERNAL_NETWORK_HOST_IDENTIFIER }} 9 | gateway4: {{ INTERNAL_MGT_DG }} 10 | match: 11 | macaddress: {{ hostvars[inventory_hostname].ansible_default_ipv4.macaddress }} 12 | nameservers: 13 | addresses: 14 | - 10.10.10.10 15 | - 1.1.1.1 16 | search: [] 17 | set-name: eth0 18 | -------------------------------------------------------------------------------- /infra/ansible/roles/dns_telegraf/tasks/datadog.yaml: -------------------------------------------------------------------------------- 1 | - name: Import the Datadog Agent role from the Datadog collection 2 | ansible.builtin.import_role: 3 | name: datadog.dd.agent 4 | vars: 5 | datadog_api_key: "{{ DATADOG_API_KEY }}" 6 | datadog_site: "{{ DATADOG_SITE }}" 7 | datadog_config: 8 | logs_enabled: true 9 | datadog_additional_groups: "systemd-journal" 10 | datadog_checks: 11 | journald: 12 | logs: 13 | - type: journald 14 | path: /var/log/journal/ 15 | 16 | - name: Reload datadog 17 | ansible.builtin.systemd_service: 18 | name: datadog-agent 19 | state: restarted 20 | enabled: true 21 | daemon_reload: true 22 | -------------------------------------------------------------------------------- /infra/terraform/main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | backend "s3" { 3 | key = "terraform/state/dns4.tfstate" 4 | region = "us-east-1" 5 | } 6 | required_providers { 7 | ansible = { 8 | source = "ansible/ansible" 9 | version = "1.3.0" 10 | } 11 | proxmox = { 12 | source = "telmate/proxmox" 13 | version = "3.0.2-rc05" 14 | } 15 | } 16 | } 17 | 18 | provider "proxmox" { 19 | # Configuration options 20 | pm_api_url = "https://${var.proxmox_host}:8006/api2/json" 21 | # TODO: Setup cert 22 | pm_tls_insecure = true 23 | pm_debug = true 24 | pm_api_token_id = var.proxmox_token_id 25 | pm_api_token_secret = var.proxmox_token_secret 26 | } 27 | -------------------------------------------------------------------------------- /generate_nn.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | header = """$TTL 300 4 | @ SOA ( nycmesh-713-dns-auth-3 hostmaster.nycmesh.net. 2025052100 1d 2h 4w 1h ) 5 | @ NS nycmesh-713-dns-auth-3 6 | 7 | nycmesh-10-dns-auth-5 A 23.158.16.23 8 | nycmesh-713-dns-auth-3 A 199.170.132.47 9 | nycmesh-713-jon-dns-auth-1 A 199.170.132.48 10 | 11 | """ 12 | 13 | def get_nn_ip(nn_s): 14 | third_octet = 0 if len(nn_s) <3 else nn_s[0:len(nn_s)-2] 15 | fourth_octet = nn_s[len(nn_s)-2:] if len(nn_s) <3 else str(int(nn_s[-2:])) 16 | return f"10.69.{third_octet}.{fourth_octet}" 17 | 18 | with open("nn.zone", "w") as fd: 19 | fd.write(header) 20 | for nn in range(1, 8001): 21 | fd.write(f"{nn} A {get_nn_ip(str(nn))}\n") 22 | fd.write(f"nn{nn} CNAME {nn}\n") 23 | -------------------------------------------------------------------------------- /infra/ansible/roles/knot_recursive/templates/certbot.sh.j2: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | DOMAIN="doh.mesh.nycmesh.net" 3 | 4 | /root/certbot_venv/bin/certbot certonly \ 5 | --dns-rfc2136 \ 6 | --dns-rfc2136-credentials /root/tsig.ini \ 7 | --non-interactive \ 8 | --agree-tos \ 9 | -m jameso@nycmesh.net \ 10 | -d $DOMAIN 11 | 12 | full_chain_path="/etc/letsencrypt/live/$DOMAIN/fullchain.pem" 13 | priv_key_path="/etc/letsencrypt/live/$DOMAIN/privkey.pem" 14 | 15 | if [ ! -f "$full_chain_path" ] || [ ! -f "$priv_key_path" ]; then 16 | exit 1 17 | else 18 | cat $full_chain_path > /etc/knot-resolver/server-cert.pem 19 | cat $priv_key_path > /etc/knot-resolver/server-key.pem 20 | 21 | proc_count="$(grep processor /proc/cpuinfo | wc -l)" 22 | for ((i = 1; i < $proc_count; ++i)); do 23 | systemctl restart kresd@$i.service 24 | sleep 5 25 | done 26 | fi 27 | -------------------------------------------------------------------------------- /infra/terraform/dev_jon.tfvars: -------------------------------------------------------------------------------- 1 | proxmox_node = "jon" 2 | proxmox_storage_location = "local-lvm" 3 | dns_auth_mgt_ip = [ 4 | "10.70.90.134", 5 | ] 6 | dns_rec_mgt_ip = [ 7 | "10.70.90.135", 8 | ] 9 | dns_auth_router_ip = [ 10 | "10.70.90.183", 11 | ] 12 | dns_rec_router_ip = [ 13 | "10.70.90.184", 14 | ] 15 | dns_auth_internal_ip = [ 16 | "10.70.90.136", 17 | ] 18 | dns_rec_internal_ip = [ 19 | "10.70.90.133", 20 | ] 21 | dns_auth_external_ip = [ 22 | "199.170.132.48", 23 | ] 24 | dns_rec_external_ip = [ 25 | "", # Blank so it is not created 26 | ] 27 | dns_rec_outgoing_ip = [ 28 | "199.170.132.41", # Blank so it is not created 29 | ] 30 | dns_mgt_network_host_identifier = "24" 31 | dns_mgt_gateway = "10.70.90.1" 32 | hostname_prefix = "nycmesh-713-jon" 33 | hostname_count_offset = 0 34 | recursive_cores = 4 35 | recursive_sockets = 1 36 | recursive_memory = 4096 37 | enable_doh = "" 38 | mesh_stub_resolver = "23.158.16.23" 39 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | NYC Mesh DNS 2 | --- 3 | 4 | This repository manages the DNS zones for the various NYC Mesh domains including `nycmesh.net` and `mesh.nycmesh.net` domains. 5 | 6 | # mesh.nycmesh.net 7 | 8 | Edit the [mesh.zone](./mesh.zone) file to add a record, please format appropriately and place under the proper heading. 9 | 10 | Please fork and make a pull request, don't push directly. 11 | 12 | # Second Level Domains - nycmesh.net 13 | 14 | Uses [lexfrei/namedotcom](https://registry.terraform.io/providers/lexfrei/namedotcom/latest/docs) to manage the DNS zones for the following domains. 15 | 16 | 1. [nycmesh.net](./sld/records.nycmesh.net.tf) 17 | 2. [nycmeshconnect.com](./sld/records.nycmeshconnect.com.tf) 18 | 3. [nycmeshconnect.net](./sld/records.nycmeshconnect.net.tf) 19 | 4. [themesh.nyc](./sld/records.themesh.nyc.tf) 20 | 21 | # Hosting 22 | 23 | The zones hosted inside the mesh are created and maintained via the terraform and ansible (IaC) found under [infra/](./infra/) and deployed via GitHub Actions as defined in [.github/workflows/deploy.yaml](./.github/workflows/deploy.yaml). 24 | -------------------------------------------------------------------------------- /sld/README.md: -------------------------------------------------------------------------------- 1 | # NYCMesh Second Level Domains 2 | 3 | 1. [nycmesh.net](./records.nycmesh.net.tf) 4 | 2. [nycmeshconnect.com](./records.nycmeshconnect.com.tf) 5 | 3. [nycmeshconnect.net](./records.nycmeshconnect.net.tf) 6 | 4. [themesh.nyc](./records.themesh.nyc.tf) 7 | 8 | ## Add DNS Record(s) 9 | 10 | 1. Fork the repository if needed. 11 | 2. Create a new branch. 12 | 3. Add a new entry to the corresponding file `records..tf`. Consult the [lexfrei/namedotcom documentation](https://registry.terraform.io/providers/lexfrei/namedotcom/latest/docs) as needed. The example below creates an `A` record pointed at `1.1.1.2`. 13 | 4. Open a [pull request](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/creating-a-pull-request) to `main`. 14 | 15 | ``` 16 | resource "namedotcom_record" "record_test" { 17 | domain_name = "nycmesh.net" 18 | host = "test" 19 | record_type = "A" 20 | answer = "1.1.1.2" 21 | } 22 | ``` 23 | 24 | ## Setup 25 | 26 | To use this repository as a template for managing other existing DNS zones hosted by [name.com](https://name.com), see [setup/README.md](./setup/README.md). 27 | -------------------------------------------------------------------------------- /infra/terraform/prod_sn11.tfvars: -------------------------------------------------------------------------------- 1 | proxmox_node = "nycmesh-sn11-prodvms1" 2 | proxmox_storage_location = "local-lvm" 3 | vm_nic = "vmbr0" 4 | dns_auth_mgt_ip = [ 5 | ] 6 | dns_rec_mgt_ip = [ 7 | "10.70.104.22", 8 | "10.70.104.24", 9 | ] 10 | dns_auth_router_ip = [ 11 | ] 12 | dns_rec_router_ip = [ 13 | "10.70.104.23", 14 | "10.70.104.25", 15 | ] 16 | dns_auth_internal_ip = [ 17 | ] 18 | dns_rec_internal_ip = [ 19 | //"10.10.10.10", 20 | //"10.10.10.11", 21 | "10.70.104.26", 22 | "10.70.104.27", 23 | ] 24 | dns_auth_external_ip = [ 25 | ] 26 | dns_rec_external_ip = [ 27 | "", # Blank so it is not created 28 | "", # Blank so it is not created 29 | ] 30 | dns_rec_outgoing_ip = [ 31 | "", # Blank so it is not created 32 | "", # Blank so it is not created 33 | ] 34 | dns_mgt_network_host_identifier = "24" 35 | dns_mgt_gateway = "10.70.104.1" 36 | hostname_prefix = "nycmesh-11" 37 | hostname_count_offset = 6 38 | recursive_cores = 5 39 | recursive_sockets = 1 40 | recursive_memory = 4096 41 | enable_doh = "" 42 | mesh_stub_resolver = "199.170.132.47" 43 | -------------------------------------------------------------------------------- /.github/workflows/deploy.yaml: -------------------------------------------------------------------------------- 1 | name: Deploy 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | paths: 8 | - infra/** 9 | workflow_dispatch: 10 | 11 | permissions: read-all 12 | 13 | jobs: 14 | #deploy_jon: 15 | # name: Deploy to jon 16 | # uses: ./.github/workflows/deploy_dns_environment.yaml 17 | # with: 18 | # environment: dev_jon 19 | # secrets: inherit 20 | # if: github.ref == 'refs/heads/main' 21 | 22 | deploy_sn11_prod: 23 | name: Deploy to sn11 prod 24 | uses: ./.github/workflows/deploy_dns_environment.yaml 25 | #needs: deploy_jon 26 | with: 27 | environment: prod_sn11 28 | secrets: inherit 29 | if: github.ref == 'refs/heads/main' 30 | 31 | deploy_sn10_prod: 32 | name: Deploy to sn10 prod 33 | uses: ./.github/workflows/deploy_dns_environment.yaml 34 | needs: deploy_sn11_prod 35 | with: 36 | environment: prod_sn10 37 | secrets: inherit 38 | if: github.ref == 'refs/heads/main' 39 | 40 | deploy_sn3_prod: 41 | name: Deploy to sn3 prod 42 | uses: ./.github/workflows/deploy_dns_environment.yaml 43 | needs: deploy_sn10_prod 44 | with: 45 | environment: prod_sn3 46 | secrets: inherit 47 | if: github.ref == 'refs/heads/main' 48 | -------------------------------------------------------------------------------- /infra/terraform/prod_sn10.tfvars: -------------------------------------------------------------------------------- 1 | proxmox_node = "nycmesh-10-r630-01" 2 | proxmox_storage_location = "local-lvm" 3 | vm_nic = "vmbr1" 4 | dns_auth_mgt_ip = [ 5 | "10.70.100.43", 6 | "10.70.100.52", 7 | ] 8 | dns_rec_mgt_ip = [ 9 | "10.70.100.44", 10 | "10.70.100.53", 11 | ] 12 | dns_auth_router_ip = [ 13 | "10.70.100.54", 14 | "10.70.100.55", 15 | ] 16 | dns_rec_router_ip = [ 17 | "10.70.100.56", 18 | "10.70.100.57", 19 | ] 20 | dns_auth_internal_ip = [ 21 | "10.70.100.45", 22 | "10.70.100.45", 23 | ] 24 | dns_rec_internal_ip = [ 25 | "10.10.10.10", 26 | "10.10.10.11", 27 | ] 28 | dns_auth_external_ip = [ 29 | "23.158.16.23", 30 | "199.170.132.47", 31 | ] 32 | dns_rec_external_ip = [ 33 | "", # Blank so it is not created 34 | "", # Blank so it is not created 35 | ] 36 | dns_rec_outgoing_ip = [ 37 | "23.158.16.25", 38 | "23.158.16.26", 39 | ] 40 | dns_mgt_network_host_identifier = "24" 41 | dns_mgt_gateway = "10.70.100.1" 42 | hostname_prefix = "nycmesh-10" 43 | hostname_count_offset = 4 44 | recursive_cores = 5 45 | recursive_sockets = 1 46 | recursive_memory = 4096 47 | enable_doh = "enable" 48 | mesh_stub_resolver = "199.170.132.47" 49 | -------------------------------------------------------------------------------- /infra/terraform/prod_sn3.tfvars: -------------------------------------------------------------------------------- 1 | proxmox_node = "nycmesh-713-r640-02" 2 | proxmox_storage_location = "local-lvm" 3 | vm_nic = "vmbr0v32" 4 | dns_auth_mgt_ip = [ 5 | "10.70.90.148", 6 | "10.70.90.181", 7 | ] 8 | dns_rec_mgt_ip = [ 9 | "10.70.90.129", 10 | "10.70.90.182", 11 | ] 12 | dns_auth_router_ip = [ 13 | "10.70.90.185", 14 | "10.70.90.187", 15 | ] 16 | dns_rec_router_ip = [ 17 | "10.70.90.192", 18 | "10.70.90.194", 19 | ] 20 | dns_auth_internal_ip = [ 21 | "10.70.90.132", 22 | "10.70.90.132", 23 | ] 24 | dns_rec_internal_ip = [ 25 | "10.10.10.10", 26 | "10.10.10.11", 27 | ] 28 | dns_auth_external_ip = [ 29 | "199.170.132.47", 30 | "23.158.16.23", 31 | ] 32 | dns_rec_external_ip = [ 33 | "", # Blank so it is not created 34 | "", # Blank so it is not created 35 | ] 36 | dns_rec_outgoing_ip = [ 37 | "199.170.132.109", 38 | "199.170.132.110", 39 | ] 40 | dns_mgt_network_host_identifier = "24" 41 | dns_mgt_gateway = "10.70.90.1" 42 | hostname_prefix = "nycmesh-713" 43 | hostname_count_offset = 2 44 | recursive_cores = 5 45 | recursive_sockets = 1 46 | recursive_memory = 4096 47 | enable_doh = "enable" 48 | mesh_stub_resolver = "23.158.16.23" 49 | -------------------------------------------------------------------------------- /.github/workflows/pull_request.yaml: -------------------------------------------------------------------------------- 1 | 2 | name: "Pull Request" 3 | 4 | on: 5 | pull_request: 6 | 7 | permissions: read-all 8 | 9 | jobs: 10 | validations: 11 | name: "Validate" 12 | runs-on: ubuntu-latest 13 | steps: 14 | - name: Checkout 15 | uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 16 | 17 | - name: Setup Terraform with specified version on the runner 18 | uses: hashicorp/setup-terraform@b9cd54a3c349d3f38e8881555d616ced269862dd # @v3 19 | with: 20 | terraform_version: 1.8.3 21 | 22 | - name: Terraform format 23 | run: terraform fmt -check 24 | working-directory: sld 25 | 26 | - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 #@v5 27 | with: 28 | python-version: '3.11' 29 | 30 | - name: Setup libraries 31 | run: pip install --require-hashes -r requirements.txt 32 | 33 | - name: Setup kzonecheck 34 | run: sudo apt-get update && sudo apt-get install -y knot-dnssecutils 35 | 36 | - name: Run kzonecheck on each zone file 37 | run: find . -type f -name "*.zone" -print0 | xargs -0L1 kzonecheck -v 38 | 39 | - name: Run ansible-lint 40 | uses: ansible/ansible-lint@8861a737a10e4bea5f5875d55d2f664908e7425c # v25 41 | with: 42 | args: "" 43 | setup_python: "true" 44 | working_directory: "./infra/ansible/" 45 | requirements_file: "" 46 | -------------------------------------------------------------------------------- /sld/records.themesh.nyc.tf: -------------------------------------------------------------------------------- 1 | # https://github.com/nycmeshnet/themesh.nyc/blob/main/CNAME 2 | 3 | resource "namedotcom_record" "themesh_A_108" { 4 | domain_name = "themesh.nyc" 5 | host = "" 6 | record_type = "A" 7 | answer = "185.199.108.153" 8 | } 9 | 10 | resource "namedotcom_record" "themesh_A_109" { 11 | domain_name = "themesh.nyc" 12 | host = "" 13 | record_type = "A" 14 | answer = "185.199.109.153" 15 | } 16 | 17 | resource "namedotcom_record" "themesh_A_110" { 18 | domain_name = "themesh.nyc" 19 | host = "" 20 | record_type = "A" 21 | answer = "185.199.110.153" 22 | } 23 | 24 | resource "namedotcom_record" "themesh_A_111" { 25 | domain_name = "themesh.nyc" 26 | host = "" 27 | record_type = "A" 28 | answer = "185.199.111.153" 29 | } 30 | 31 | resource "namedotcom_record" "themesh_nyc_www_cname" { 32 | domain_name = "themesh.nyc" 33 | host = "www" 34 | record_type = "CNAME" 35 | answer = "nycmeshnet.github.io" 36 | } 37 | 38 | # Authoritative DNS server at SN3 39 | resource "namedotcom_record" "nycmesh-713-dns-auth-3-themesh-nyc" { 40 | domain_name = "themesh.nyc" 41 | host = "nycmesh-713-dns-auth-3" 42 | record_type = "A" 43 | answer = "199.170.132.47" 44 | } 45 | 46 | # Authoritative DNS server at SN10 47 | resource "namedotcom_record" "nycmesh-713-dns-auth-5-themesh-nyc" { 48 | domain_name = "themesh.nyc" 49 | host = "nycmesh-10-dns-auth-5" 50 | record_type = "A" 51 | answer = "23.158.16.23" 52 | } 53 | -------------------------------------------------------------------------------- /.github/workflows/codeql.yml: -------------------------------------------------------------------------------- 1 | name: "CodeQL" 2 | 3 | on: 4 | push: 5 | branches: [ "main" ] 6 | pull_request: 7 | branches: [ "main" ] 8 | schedule: 9 | - cron: '25 4 * * 0' 10 | 11 | permissions: read-all 12 | 13 | jobs: 14 | analyze: 15 | name: Analyze (${{ matrix.language }}) 16 | runs-on: ${{ (matrix.language == 'swift' && 'macos-latest') || 'ubuntu-latest' }} 17 | permissions: 18 | # required for all workflows 19 | security-events: write 20 | 21 | # required to fetch internal or private CodeQL packs 22 | packages: read 23 | 24 | # only required for workflows in private repositories 25 | actions: read 26 | contents: read 27 | 28 | strategy: 29 | fail-fast: false 30 | matrix: 31 | include: 32 | - language: actions 33 | build-mode: none 34 | - language: python 35 | build-mode: none 36 | steps: 37 | - name: Checkout repository 38 | uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 39 | 40 | # Initializes the CodeQL tools for scanning. 41 | - name: Initialize CodeQL 42 | uses: github/codeql-action/init@3c3833e0f8c1c83d449a7478aa59c036a9165498 # v3 43 | with: 44 | languages: ${{ matrix.language }} 45 | build-mode: ${{ matrix.build-mode }} 46 | 47 | - name: Perform CodeQL Analysis 48 | uses: github/codeql-action/analyze@3c3833e0f8c1c83d449a7478aa59c036a9165498 # v3 49 | with: 50 | category: "/language:${{matrix.language}}" 51 | -------------------------------------------------------------------------------- /sld/records.nycmeshconnect.com.tf: -------------------------------------------------------------------------------- 1 | # Pointed to github pages 2 | # https://github.com/nycmeshnet/connect-redirect 3 | 4 | resource "namedotcom_record" "A_108" { 5 | domain_name = "nycmeshconnect.com" 6 | host = "" 7 | record_type = "A" 8 | answer = "185.199.108.153" 9 | } 10 | 11 | resource "namedotcom_record" "A_109" { 12 | domain_name = "nycmeshconnect.com" 13 | host = "" 14 | record_type = "A" 15 | answer = "185.199.109.153" 16 | } 17 | 18 | resource "namedotcom_record" "A_110" { 19 | domain_name = "nycmeshconnect.com" 20 | host = "" 21 | record_type = "A" 22 | answer = "185.199.110.153" 23 | } 24 | 25 | resource "namedotcom_record" "A_111" { 26 | domain_name = "nycmeshconnect.com" 27 | host = "" 28 | record_type = "A" 29 | answer = "185.199.111.153" 30 | } 31 | 32 | resource "namedotcom_record" "nycmeshconnect_com_www_cname" { 33 | domain_name = "nycmeshconnect.com" 34 | host = "www" 35 | record_type = "CNAME" 36 | answer = "nycmeshnet.github.io" 37 | } 38 | 39 | # Authoritative DNS server at SN3 40 | resource "namedotcom_record" "nycmesh-713-dns-auth-3-nycmeshconnect-com" { 41 | domain_name = "nycmeshconnect.com" 42 | host = "nycmesh-713-dns-auth-3" 43 | record_type = "A" 44 | answer = "199.170.132.47" 45 | } 46 | 47 | # Authoritative DNS server at SN10 48 | resource "namedotcom_record" "nycmesh-10-dns-auth-5-nycmeshconnect-com" { 49 | domain_name = "nycmeshconnect.com" 50 | host = "nycmesh-10-dns-auth-5" 51 | record_type = "A" 52 | answer = "23.158.16.23" 53 | } 54 | -------------------------------------------------------------------------------- /sld/records.nycmeshconnect.net.tf: -------------------------------------------------------------------------------- 1 | # Pointed to github pages 2 | # https://github.com/nycmeshnet/connect/blob/main/CNAME 3 | 4 | resource "namedotcom_record" "record__240356243" { 5 | domain_name = "nycmeshconnect.net" 6 | host = "" 7 | record_type = "A" 8 | answer = "185.199.108.153" 9 | } 10 | 11 | resource "namedotcom_record" "record__240356247" { 12 | domain_name = "nycmeshconnect.net" 13 | host = "" 14 | record_type = "A" 15 | answer = "185.199.109.153" 16 | } 17 | 18 | resource "namedotcom_record" "record__240356249" { 19 | domain_name = "nycmeshconnect.net" 20 | host = "" 21 | record_type = "A" 22 | answer = "185.199.110.153" 23 | } 24 | 25 | resource "namedotcom_record" "record__240356250" { 26 | domain_name = "nycmeshconnect.net" 27 | host = "" 28 | record_type = "A" 29 | answer = "185.199.111.153" 30 | } 31 | 32 | resource "namedotcom_record" "nycmeshconnect_net_www_cname" { 33 | domain_name = "nycmeshconnect.net" 34 | host = "www" 35 | record_type = "CNAME" 36 | answer = "nycmeshnet.github.io" 37 | } 38 | 39 | # Authoritative DNS server at SN3 40 | resource "namedotcom_record" "nycmesh-713-dns-auth-3-nycmeshconnect-net" { 41 | domain_name = "nycmeshconnect.net" 42 | host = "nycmesh-713-dns-auth-3" 43 | record_type = "A" 44 | answer = "199.170.132.47" 45 | } 46 | 47 | # Authoritative DNS server at SN10 48 | resource "namedotcom_record" "nycmesh-10-dns-auth-5-nycmeshconnect-net" { 49 | domain_name = "nycmeshconnect.net" 50 | host = "nycmesh-10-dns-auth-5" 51 | record_type = "A" 52 | answer = "23.158.16.23" 53 | } 54 | -------------------------------------------------------------------------------- /infra/terraform/mesh_dns_servers/vm_recursive.tf: -------------------------------------------------------------------------------- 1 | resource "proxmox_vm_qemu" "recursive_dns_vm" { 2 | count = length(var.dns_rec_mgt_ip) 3 | name = "${var.hostname_prefix}-dns-rec-${sum([1, count.index, var.hostname_count_offset])}" 4 | desc = "Recursive knot ${count.index}" 5 | target_node = var.proxmox_node 6 | 7 | clone = var.proxmox_template_image 8 | 9 | cpu { 10 | cores = var.recursive_cores 11 | sockets = var.recursive_sockets 12 | type = "host" 13 | } 14 | 15 | memory = var.recursive_memory 16 | os_type = "cloud-init" 17 | agent = 1 18 | ciuser = var.dns_local_user 19 | cipassword = var.mesh_dns_local_password 20 | 21 | scsihw = "virtio-scsi-pci" 22 | 23 | disks { 24 | scsi { 25 | scsi0 { 26 | disk { 27 | backup = false 28 | size = 15 29 | storage = var.proxmox_storage_location 30 | } 31 | } 32 | } 33 | ide { 34 | ide3 { 35 | cloudinit { 36 | storage = var.proxmox_storage_location 37 | } 38 | } 39 | } 40 | } 41 | 42 | network { 43 | id = 0 44 | bridge = var.vm_nic 45 | model = "virtio" 46 | } 47 | 48 | ipconfig0 = "ip=${var.dns_rec_mgt_ip[count.index]}/${var.dns_mgt_network_host_identifier},gw=${var.dns_mgt_gateway}" 49 | 50 | ssh_user = "root" 51 | ssh_private_key = file("${path.module}/../${var.dns_ssh_key_name}") 52 | 53 | sshkeys = file("${path.module}/../${var.dns_ssh_key_name}.pub") 54 | 55 | serial { 56 | id = 0 57 | type = "socket" 58 | } 59 | 60 | tags = "dns,managed_by_iac" 61 | 62 | lifecycle { 63 | ignore_changes = [ 64 | qemu_os, 65 | ] 66 | } 67 | } 68 | -------------------------------------------------------------------------------- /makereverse.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import re 4 | 5 | domain = None 6 | validreverse = ['10','59.167.199'] 7 | files = ['mesh.zone'] 8 | records = [] 9 | output = { r: [] for r in validreverse } 10 | 11 | header = """$ORIGIN {} 12 | $TTL 3600 13 | @ SOA nycmesh-713-dns-auth-3 hostmaster.nycmesh.net. ( 2024120100 1d 2h 4w 1h ) 14 | @ NS nycmesh-713-dns-auth-3 15 | @ NS nycmesh-10-dns-auth-5 16 | @ A 10.10.10.11 17 | nycmesh-10-dns-auth-5 A 23.158.16.23 18 | nycmesh-713-dns-auth-3 A 199.170.132.47 19 | """ 20 | 21 | def inaddrarpa(x): 22 | return '{}.in-addr.arpa.'.format(x) 23 | 24 | def swapip(a): 25 | b = a.split('.') 26 | return '.'.join([b[3],b[2],b[1],b[0]]) 27 | 28 | def whichreversedomain(i): 29 | for r in validreverse: 30 | if re.search('.*\.' + r, i): 31 | return r 32 | return None 33 | 34 | def chopname(n,z): 35 | return n[0:(0-len(z)-1)] 36 | 37 | # Generate the PTRs for all A records 38 | for f in files: 39 | with open('mesh.zone') as f: 40 | for l in f: 41 | j = l.strip().split() 42 | if len(j) == 3 and j[0] == ';' and j[1] == "$ORIGIN": 43 | domain = j[2] 44 | if len(j) == 3 and j[1] == "A": 45 | a = swapip(j[2]) 46 | b = 'PTR' 47 | c = j[0] + '.' + domain 48 | wr = whichreversedomain(a) 49 | if wr is not None: 50 | a = chopname(a,wr) 51 | if wr == '59.167.199': 52 | c = c + 'nycmesh.net.' 53 | if not [ x for x in output[wr] if x[0] == a ]: 54 | output[wr].append([a,b,c]) 55 | 56 | # Output each record to the appropriate file 57 | for k in output: 58 | fo = open(inaddrarpa(k) + 'zone', 'w') 59 | fo.write(header.format(inaddrarpa(k))) 60 | for i in output[k]: 61 | fo.write(' '.join(i) + '\n') 62 | fo.close() 63 | 64 | 65 | -------------------------------------------------------------------------------- /infra/terraform/mesh_dns_servers/vm_authoritative.tf: -------------------------------------------------------------------------------- 1 | resource "proxmox_vm_qemu" "authoritative_dns_vm" { 2 | count = length(var.dns_auth_mgt_ip) 3 | name = "${var.hostname_prefix}-dns-auth-${sum([1, count.index, var.hostname_count_offset])}" 4 | desc = "Authoritative knot ${count.index}" 5 | target_node = var.proxmox_node 6 | 7 | clone = var.proxmox_template_image 8 | 9 | cpu { 10 | cores = var.recursive_cores 11 | sockets = var.recursive_sockets 12 | type = "host" 13 | } 14 | 15 | memory = var.authoritative_memory 16 | os_type = "cloud-init" 17 | agent = 1 18 | ciuser = var.dns_local_user 19 | cipassword = var.mesh_dns_local_password 20 | 21 | scsihw = "virtio-scsi-pci" 22 | 23 | disks { 24 | scsi { 25 | scsi0 { 26 | disk { 27 | backup = false 28 | size = 15 29 | storage = var.proxmox_storage_location 30 | } 31 | } 32 | } 33 | ide { 34 | ide3 { 35 | cloudinit { 36 | storage = var.proxmox_storage_location 37 | } 38 | } 39 | } 40 | } 41 | 42 | network { 43 | id = 0 44 | bridge = var.vm_nic 45 | model = "virtio" 46 | } 47 | 48 | ipconfig0 = "ip=${var.dns_auth_mgt_ip[count.index]}/${var.dns_mgt_network_host_identifier},gw=${var.dns_mgt_gateway}" 49 | 50 | ssh_user = "root" 51 | ssh_private_key = file("${path.module}/../${var.dns_ssh_key_name}") 52 | 53 | sshkeys = file("${path.module}/../${var.dns_ssh_key_name}.pub") 54 | 55 | serial { 56 | id = 0 57 | type = "socket" 58 | } 59 | 60 | tags = "dns,managed_by_iac" 61 | 62 | lifecycle { 63 | ignore_changes = [ 64 | qemu_os, 65 | ] 66 | } 67 | } 68 | -------------------------------------------------------------------------------- /infra/ansible/roles/knot_recursive/tasks/doh.yaml: -------------------------------------------------------------------------------- 1 | - name: Setup doh 2 | when: DOH_SERVER != "" 3 | block: 4 | - name: Install deps 5 | ansible.builtin.apt: 6 | lock_timeout: 120 7 | update_cache: true 8 | pkg: 9 | - python3-pip 10 | - python3.11-venv 11 | - cron 12 | 13 | - name: Install certbot 14 | ansible.builtin.pip: 15 | name: 16 | - certbot==4.0.0 17 | - certbot-dns-rfc2136==4.0.0 18 | virtualenv: /root/certbot_venv 19 | virtualenv_command: python3 -m venv 20 | 21 | - name: Certbot script 22 | ansible.builtin.template: 23 | src: certbot.sh.j2 24 | dest: /root/certbot.sh 25 | mode: "700" 26 | 27 | - name: Create tsig.ini 28 | ansible.builtin.template: 29 | src: tsig.ini.j2 30 | dest: /root/tsig.ini 31 | mode: "700" 32 | 33 | - name: Get cert 34 | ansible.builtin.command: 35 | cmd: /root/certbot.sh 36 | creates: /etc/knot-resolver/server-cert.pem 37 | 38 | - name: Crontab 39 | ansible.builtin.template: 40 | src: certbot_update_cert.j2 41 | dest: /etc/cron.d/certbot_update_cert 42 | mode: "700" 43 | 44 | - name: Restart and enable cron service 45 | ansible.builtin.service: 46 | name: cron 47 | state: restarted 48 | enabled: true 49 | 50 | - name: Setup doh 51 | when: DOH_SERVER == "" 52 | block: 53 | - name: Cleanup crontab 54 | ansible.builtin.file: 55 | path: /etc/cron.d/certbot_update_cert 56 | state: absent 57 | 58 | - name: Cleanup tsig 59 | ansible.builtin.file: 60 | path: /root/tsig.ini 61 | state: absent 62 | 63 | - name: Cleanup certbot 64 | ansible.builtin.file: 65 | path: /root/certbot.sh 66 | state: absent 67 | -------------------------------------------------------------------------------- /.github/workflows/sld_terraform.yaml: -------------------------------------------------------------------------------- 1 | 2 | name: "SLD Terraform" 3 | 4 | on: 5 | push: 6 | branches: 7 | - main 8 | workflow_dispatch: 9 | branches: 10 | - main 11 | 12 | permissions: read-all 13 | 14 | defaults: 15 | run: 16 | working-directory: sld 17 | 18 | env: 19 | # Credentials for name dot com 20 | TF_VAR_name_dot_com_user: ${{ secrets.TF_VAR_name_dot_com_user }} 21 | TF_VAR_name_dot_com_token: ${{ secrets.TF_VAR_name_dot_com_token }} 22 | # Credentials for deployment to AWS 23 | AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} 24 | AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} 25 | # S3 bucket for the Terraform state 26 | BUCKET_TF_STATE: ${{ secrets.BUCKET_TF_STATE}} 27 | 28 | jobs: 29 | terraform: 30 | name: "Terraform" 31 | environment: prod 32 | runs-on: ubuntu-latest 33 | steps: 34 | - name: Checkout 35 | uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 36 | 37 | - name: Setup Terraform with specified version on the runner 38 | uses: hashicorp/setup-terraform@b9cd54a3c349d3f38e8881555d616ced269862dd # @v3 39 | with: 40 | terraform_version: 1.8.3 41 | 42 | - name: Terraform init 43 | id: init 44 | run: terraform init -backend-config="bucket=$BUCKET_TF_STATE" 45 | 46 | - name: Terraform format 47 | id: fmt 48 | run: terraform fmt -check 49 | 50 | - name: Terraform validate 51 | id: validate 52 | run: terraform validate 53 | 54 | - name: Terraform plan 55 | if: github.ref == 'refs/heads/main' && github.event_name == 'push' 56 | run: terraform plan -no-color -input=false -parallelism=1 57 | 58 | - name: Terraform Apply 59 | if: github.ref == 'refs/heads/main' && github.event_name == 'push' 60 | run: terraform apply -auto-approve -input=false -parallelism=1 61 | -------------------------------------------------------------------------------- /sld/setup/one_time_setup.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import os 3 | 4 | DOMAIN = os.environ["TF_VAR_name_dot_com_domain"] 5 | NAME_DOT_COM_USER = os.environ["TF_VAR_name_dot_com_user"] 6 | NAME_DOT_COM_TOKEN = os.environ["TF_VAR_name_dot_com_token"] 7 | 8 | OUTPUT_TF = f"../records.{DOMAIN}.tf" 9 | OUTPUT_SH = f"import.{DOMAIN}.sh" 10 | 11 | class OneTimeSetup: 12 | def get_records(self, domain_name): 13 | ret = [] 14 | per_page = 1000 15 | url = f"https://api.name.com/v4/domains/{domain_name}/records?perPage={per_page}" 16 | res = requests.get(url, auth=(NAME_DOT_COM_USER, NAME_DOT_COM_TOKEN)) 17 | ret.extend(res.json()["records"]) 18 | while res.json().get("nextPage", None) is not None: 19 | next_page = res.json()["nextPage"] 20 | url = f"https://api.name.com/v4/domains/{domain_name}/records?perPage={per_page}&page={next_page}" 21 | res = requests.get(url, auth=(NAME_DOT_COM_USER, NAME_DOT_COM_TOKEN)) 22 | ret.extend(res.json()["records"]) 23 | return ret 24 | 25 | def one_time_setup(self): 26 | with open(OUTPUT_TF, "w") as fdtf: 27 | with open(OUTPUT_SH, "w") as fdsh: 28 | for item in self.get_records(DOMAIN): 29 | tf, sh = self.generate_resource(item) 30 | fdtf.write(tf) 31 | fdsh.write(sh) 32 | 33 | 34 | def generate_resource(self, row): 35 | print(row) 36 | record_id = row["id"] 37 | host = row.get("host", "") 38 | record_type = row["type"] 39 | answer = row["answer"] 40 | resource_name = f"record_{host}_{record_id}" 41 | tf = f""" 42 | resource "namedotcom_record" "{resource_name}" {{ 43 | domain_name = "{DOMAIN}" 44 | host = "{host}" 45 | record_type = "{record_type}" 46 | answer = "{answer}" 47 | }} 48 | """ 49 | sh = f"terraform import namedotcom_record.{resource_name} {DOMAIN}:{record_id}\n" 50 | return tf, sh 51 | 52 | if __name__ == "__main__": 53 | ots = OneTimeSetup() 54 | ots.one_time_setup() 55 | -------------------------------------------------------------------------------- /sld/setup/README.md: -------------------------------------------------------------------------------- 1 | # SLD One Time Setup 2 | 3 | ## GitHub Repository Setup 4 | 5 | 1. Obtain your [name dot com API key](https://www.name.com/account/settings/api). 6 | 2. Set the secret `TF_VAR_name_dot_com_user` to your name dot com username. 7 | 3. Set the secret `TF_VAR_name_dot_com_token` to your name dot com token. 8 | 4. Setup a s3 bucket (or similar) based on your needs for state storage. For s3, follow the terraform [instructions](https://developer.hashicorp.com/terraform/language/settings/backends/s3#s3-bucket-permissions). Set the secret `BUCKET_TF_STATE` to the bucket name. 9 | 5. Set the secrets `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY`. 10 | 11 | ## Initial Setup (Existing Domain) 12 | 13 | Complete the following from your local machine. These steps should work on linux with python3 + pip installed. 14 | 15 | 1. Clone this repository. 16 | 2. Setup a python environment: 17 | ``` 18 | cd setup 19 | python3 -m venv venv 20 | source venv/bin/activate 21 | pip install requests 22 | ``` 23 | 3. Obtain your [name dot com API key](https://www.name.com/account/settings/api). 24 | 4. Obtain your `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` (as above). 25 | 5. Update `main.tf` with your bucket information (`backend` configuration). 26 | 6. Execute the one time setup: 27 | ``` 28 | export TF_VAR_name_dot_com_domain="nycmesh.net" 29 | export TF_VAR_name_dot_com_user="theactualvalue" 30 | export TF_VAR_name_dot_com_token="theactualvalue" 31 | export BUCKET_TF_STATE="yourbucketname" 32 | export AWS_ACCESS_KEY_ID="theactualvalue" 33 | export AWS_SECRET_ACCESS_KEY="theactualvalue" 34 | python3 one_time_setup.py 35 | ``` 36 | 7. Inspect the generated `../records.${TF_VAR_name_dot_com_domain}.tf`, compare it to your domain in the name dot com UI. Make manual corrections as needed. 37 | 8. Inspect the generated `import.${TF_VAR_name_dot_com_domain}.sh`. Make manual corrections as needed. 38 | 9. Execute `cd ..` 39 | 10. Execute `terraform init -backend-config="bucket=$BUCKET_TF_STATE"` 40 | 11. Execute `bash setup/import.${TF_VAR_name_dot_com_domain}.sh` 41 | 12. Execute `terraform plan` 42 | 13. Execute `terraform apply` 43 | -------------------------------------------------------------------------------- /infra/ansible/roles/dns_telegraf/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | - name: Install deps 2 | ansible.builtin.apt: 3 | lock_timeout: 120 4 | update_cache: true 5 | pkg: 6 | - gpg 7 | 8 | - name: Download influx gpg key 9 | ansible.builtin.get_url: 10 | url: https://repos.influxdata.com/influxdata-archive_compat.key 11 | dest: /root/influxdata-archive_compat.key 12 | mode: "600" 13 | 14 | - name: Check influx gpg key 15 | ansible.builtin.command: 16 | chdir: /root 17 | cmd: bash -c "echo '393e8779c89ac8d958f81f942f9ad7fb82a25e133faddaf92e15b16e6ac9ce4c influxdata-archive_compat.key' | sha256sum -c" 18 | creates: /tmp/fake_for_linter 19 | 20 | - name: Add influx gpg key 21 | ansible.builtin.command: 22 | chdir: /root 23 | cmd: bash -c 'cat influxdata-archive_compat.key | gpg --dearmor > /etc/apt/trusted.gpg.d/influxdata-archive_compat.gpg' 24 | creates: /etc/apt/trusted.gpg.d/influxdata-archive_compat.gpg 25 | 26 | - name: Add package source for influxdata 27 | ansible.builtin.lineinfile: 28 | path: /etc/apt/sources.list.d/influxdata.list 29 | line: "deb [signed-by=/etc/apt/trusted.gpg.d/influxdata-archive_compat.gpg] https://repos.influxdata.com/debian stable main" 30 | create: true 31 | mode: "644" 32 | 33 | - name: Install telegraf 34 | ansible.builtin.apt: 35 | lock_timeout: 120 36 | update_cache: true 37 | pkg: 38 | - telegraf 39 | 40 | - name: Telegraf knot 41 | ansible.builtin.include_tasks: 42 | file: telegraf_knot.yaml 43 | when: telegraf_knot != "" 44 | 45 | - name: Telegraf Config 46 | ansible.builtin.template: 47 | src: telegraf.conf.j2 48 | dest: /etc/telegraf/telegraf.conf 49 | mode: "644" 50 | 51 | - name: Allow restarting of telegraf 52 | ansible.builtin.lineinfile: 53 | path: /lib/systemd/system/telegraf.service 54 | search_string: Restart= 55 | line: "Restart=always" 56 | 57 | - name: Restart and enable telegraf service 58 | ansible.builtin.systemd_service: 59 | state: restarted 60 | daemon_reload: true 61 | name: telegraf 62 | enabled: true 63 | 64 | - name: Datadog 65 | ansible.builtin.include_tasks: 66 | file: datadog.yaml 67 | -------------------------------------------------------------------------------- /infra/ansible/roles/dns_ospf/templates/iptables.j2: -------------------------------------------------------------------------------- 1 | *filter 2 | :INPUT ACCEPT [0:0] 3 | :FORWARD ACCEPT [0:0] 4 | :OUTPUT ACCEPT [0:0] 5 | 6 | -A INPUT -s 10.70.90.53/32 -d {{ INTERNAL_MGT_IP }}/32 -j ACCEPT 7 | -A INPUT -s 10.70.73.66/32 -d {{ INTERNAL_MGT_IP }}/32 -j ACCEPT 8 | -A INPUT -s 10.70.0.0/16 -d {{ INTERNAL_MGT_IP }}/32 -j ACCEPT 9 | 10 | {% if INTERNAL_LISTEN_IP != "" %} 11 | -A INPUT -d {{ INTERNAL_LISTEN_IP }}/32 -p tcp -m tcp --dport 53 -j ACCEPT 12 | -A INPUT -d {{ INTERNAL_LISTEN_IP }}/32 -p udp -m udp --dport 53 -j ACCEPT 13 | -A INPUT -d {{ INTERNAL_LISTEN_IP }}/32 -p icmp --icmp-type echo-request -j ACCEPT 14 | {% if DOH_SERVER != "" %} 15 | -A INPUT -d {{ INTERNAL_LISTEN_IP }}/32 -p tcp -m tcp --dport 443 -j ACCEPT 16 | {% endif %} 17 | -A INPUT -d {{ INTERNAL_LISTEN_IP }}/32 -j DROP 18 | {% endif %} 19 | 20 | {% if EXTERNAL_LISTEN_IP != "" %} 21 | -A INPUT -d {{ EXTERNAL_LISTEN_IP }}/32 -p tcp -m tcp --dport 53 -j ACCEPT 22 | -A INPUT -d {{ EXTERNAL_LISTEN_IP }}/32 -p udp -m udp --dport 53 -j ACCEPT 23 | {% if DOH_SERVER != "" %} 24 | -A INPUT -d {{ EXTERNAL_LISTEN_IP }}/32 -p tcp -m tcp --dport 443 -j ACCEPT 25 | {% endif %} 26 | -A INPUT -d {{ EXTERNAL_LISTEN_IP }}/32 -j DROP 27 | {% endif %} 28 | 29 | # Catch all 30 | -A INPUT -p tcp --dport 5355 -j DROP 31 | -A INPUT -p udp --dport 5355 -j DROP 32 | -A INPUT -p tcp --dport 53 -j DROP 33 | -A INPUT -p udp --dport 53 -j DROP 34 | -A INPUT -p tcp --dport 443 -j DROP 35 | -A INPUT -p tcp --dport 22 -j DROP 36 | -A INPUT -p tcp --dport 25 -j DROP 37 | 38 | -A FORWARD -s 10.70.90.53/32 -d {{ INTERNAL_MGT_IP }}/32 -j ACCEPT 39 | 40 | {% if INTERNAL_LISTEN_IP != "" %} 41 | -A FORWARD -d {{ INTERNAL_LISTEN_IP }}/32 -p tcp -m tcp --dport 53 -j ACCEPT 42 | -A FORWARD -d {{ INTERNAL_LISTEN_IP }}/32 -p udp -m udp --dport 53 -j ACCEPT 43 | {% if DOH_SERVER != "" %} 44 | -A INPUT -d {{ INTERNAL_LISTEN_IP }}/32 -p tcp -m tcp --dport 443 -j ACCEPT 45 | {% endif %} 46 | -A FORWARD -d {{ INTERNAL_LISTEN_IP }}/32 -j DROP 47 | {% endif %} 48 | 49 | {% if EXTERNAL_LISTEN_IP != "" %} 50 | -A FORWARD -d {{ EXTERNAL_LISTEN_IP }}/32 -p tcp -m tcp --dport 53 -j ACCEPT 51 | -A FORWARD -d {{ EXTERNAL_LISTEN_IP }}/32 -p udp -m udp --dport 53 -j ACCEPT 52 | {% if DOH_SERVER != "" %} 53 | -A INPUT -d {{ EXTERNAL_LISTEN_IP }}/32 -p tcp -m tcp --dport 443 -j ACCEPT 54 | {% endif %} 55 | -A FORWARD -d {{ EXTERNAL_LISTEN_IP }}/32 -j DROP 56 | {% endif %} 57 | 58 | COMMIT -------------------------------------------------------------------------------- /infra/terraform/dns.tf: -------------------------------------------------------------------------------- 1 | module "some_mesh_dns_servers" { 2 | source = "./mesh_dns_servers" 3 | 4 | mesh_dns_local_password = var.mesh_dns_local_password 5 | influx_db_token = var.influx_db_token 6 | proxmox_node = var.proxmox_node 7 | proxmox_template_image = "debian-cloud" 8 | proxmox_storage_location = var.proxmox_storage_location 9 | dns_local_user = "debian" 10 | dns_auth_mgt_ip = var.dns_auth_mgt_ip 11 | dns_rec_mgt_ip = var.dns_rec_mgt_ip 12 | dns_auth_router_ip = var.dns_auth_router_ip 13 | dns_rec_router_ip = var.dns_rec_router_ip 14 | dns_auth_internal_ip = var.dns_auth_internal_ip 15 | dns_rec_internal_ip = var.dns_rec_internal_ip 16 | dns_auth_external_ip = var.dns_auth_external_ip 17 | dns_rec_external_ip = var.dns_rec_external_ip 18 | dns_rec_outgoing_ip = var.dns_rec_outgoing_ip 19 | dns_mgt_network_host_identifier = var.dns_mgt_network_host_identifier 20 | dns_mgt_gateway = var.dns_mgt_gateway 21 | dns_ssh_key_name = "dns_ed25519" 22 | hostname_prefix = var.hostname_prefix 23 | hostname_count_offset = var.hostname_count_offset 24 | recursive_cores = var.recursive_cores 25 | recursive_sockets = var.recursive_sockets 26 | recursive_memory = var.recursive_memory 27 | vm_nic = var.vm_nic 28 | datadog_api_key = var.datadog_api_key 29 | datadog_site = var.datadog_site 30 | dns_cookie_secret = var.dns_cookie_secret 31 | tsig_key_k8s_prod1 = var.tsig_key_k8s_prod1 32 | tsig_key_k8s_prod2 = var.tsig_key_k8s_prod2 33 | tsig_key_k8s_dev3 = var.tsig_key_k8s_dev3 34 | tsig_key_jon = var.tsig_key_jon 35 | tsig_key_10_r630_01 = var.tsig_key_10_r630_01 36 | tsig_key_713_r640_01 = var.tsig_key_713_r640_01 37 | tsig_key_ha = var.tsig_key_ha 38 | tsig_key_doh = var.tsig_key_doh 39 | tsig_key_nn = var.tsig_key_nn 40 | enable_doh = var.enable_doh 41 | main_auth_server_ip = var.main_auth_server_ip 42 | mesh_stub_resolver = var.mesh_stub_resolver 43 | bird_network = var.bird_network 44 | } 45 | -------------------------------------------------------------------------------- /infra/ansible/roles/knot_recursive/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | - name: Install deps 2 | ansible.builtin.apt: 3 | lock_timeout: 120 4 | update_cache: true 5 | pkg: 6 | - apt-transport-https 7 | - ca-certificates 8 | - dnsutils 9 | 10 | - name: Download gpg key 11 | ansible.builtin.get_url: 12 | url: https://pkg.labs.nic.cz/gpg 13 | dest: /usr/share/keyrings/cznic-labs-pkg.gpg 14 | mode: "644" 15 | 16 | - name: Add package source 17 | ansible.builtin.lineinfile: 18 | path: /etc/apt/sources.list.d/cznic-labs-knot-dns.list 19 | line: "deb [signed-by=/usr/share/keyrings/cznic-labs-pkg.gpg] https://pkg.labs.nic.cz/knot-resolver bookworm main" 20 | create: true 21 | mode: "644" 22 | 23 | - name: Install knot 24 | ansible.builtin.apt: 25 | lock_timeout: 120 26 | update_cache: true 27 | pkg: 28 | - knot-resolver 29 | - knot-dnsutils 30 | - knot-resolver-module-http 31 | 32 | - name: Mount knot cache as tmpfs 33 | ansible.posix.mount: 34 | path: /var/cache/knot-resolver-cache 35 | src: tmpfs 36 | fstype: tmpfs 37 | opts: rw,size=2G,uid=knot-resolver,gid=knot-resolver,nosuid,nodev,noexec,mode=0700 38 | dump: 0 39 | passno: 0 40 | state: mounted 41 | 42 | - name: Knot Resolver Config 43 | ansible.builtin.template: 44 | src: kresd.conf.j2 45 | dest: /etc/knot-resolver/kresd.conf 46 | owner: root 47 | group: knot-resolver 48 | mode: "640" 49 | 50 | - name: Enable restarts for kresd 51 | ansible.builtin.lineinfile: 52 | path: /lib/systemd/system/kresd@.service 53 | search_string: Restart= 54 | line: "Restart=always" 55 | 56 | - name: Setup DoH 57 | ansible.builtin.include_tasks: 58 | file: doh.yaml 59 | 60 | - name: Fix kres-cache-gc.service 61 | ansible.builtin.lineinfile: 62 | dest: /etc/systemd/system/kresd.target.wants/kres-cache-gc.service 63 | regexp: '^ExecStart=' 64 | line: 'ExecStart=/usr/sbin/kres-cache-gc -c /var/cache/knot-resolver-cache -d 1000' 65 | state: present 66 | 67 | - name: Restart and enable kres-cache-gc service 68 | ansible.builtin.systemd_service: 69 | name: kres-cache-gc 70 | state: restarted 71 | enabled: true 72 | daemon_reload: true # For kres-cache-gc + ospfd 73 | 74 | - name: Restart and enable knot-resolver service, $CPU_CORES - 1 75 | ansible.builtin.service: 76 | name: kresd@{{ item }} 77 | state: restarted 78 | enabled: true 79 | loop: "{{ range(1, ansible_processor_cores) | list }}" 80 | loop_control: 81 | pause: 5 82 | -------------------------------------------------------------------------------- /.github/workflows/scorecard.yml: -------------------------------------------------------------------------------- 1 | name: Scorecard supply-chain security 2 | on: 3 | # For Branch-Protection check. Only the default branch is supported. See 4 | # https://github.com/ossf/scorecard/blob/main/docs/checks.md#branch-protection 5 | branch_protection_rule: 6 | # To guarantee Maintained check is occasionally updated. See 7 | # https://github.com/ossf/scorecard/blob/main/docs/checks.md#maintained 8 | schedule: 9 | - cron: '40 15 * * 6' 10 | push: 11 | branches: [ "main" ] 12 | 13 | # Declare default permissions as read only. 14 | permissions: read-all 15 | 16 | jobs: 17 | analysis: 18 | name: Scorecard analysis 19 | runs-on: ubuntu-latest 20 | # `publish_results: true` only works when run from the default branch. conditional can be removed if disabled. 21 | if: github.event.repository.default_branch == github.ref_name || github.event_name == 'pull_request' 22 | permissions: 23 | # Needed to upload the results to code-scanning dashboard. 24 | security-events: write 25 | # Needed to publish results and get a badge (see publish_results below). 26 | id-token: write 27 | 28 | steps: 29 | - name: "Checkout code" 30 | uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 31 | with: 32 | persist-credentials: false 33 | 34 | - name: "Run analysis" 35 | uses: ossf/scorecard-action@05b42c624433fc40578a4040d5cf5e36ddca8cde # v2.4.2 36 | with: 37 | results_file: results.sarif 38 | results_format: sarif 39 | # (Optional) "write" PAT token. Uncomment the `repo_token` line below if: 40 | # - you want to enable the Branch-Protection check on a *public* repository, or 41 | # - you are installing Scorecard on a *private* repository 42 | # To create the PAT, follow the steps in https://github.com/ossf/scorecard-action?tab=readme-ov-file#authentication-with-fine-grained-pat-optional. 43 | # repo_token: ${{ secrets.SCORECARD_TOKEN }} 44 | 45 | # Public repositories: 46 | # - Publish results to OpenSSF REST API for easy access by consumers 47 | # - Allows the repository to include the Scorecard badge. 48 | # - See https://github.com/ossf/scorecard-action#publishing-results. 49 | publish_results: true 50 | 51 | # Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF 52 | # format to the repository Actions tab. 53 | - name: "Upload artifact" 54 | uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 55 | with: 56 | name: SARIF file 57 | path: results.sarif 58 | retention-days: 5 59 | 60 | # Upload the results to GitHub's code scanning dashboard (optional). 61 | - name: "Upload to code-scanning" 62 | uses: github/codeql-action/upload-sarif@3c3833e0f8c1c83d449a7478aa59c036a9165498 # v3 63 | with: 64 | sarif_file: results.sarif 65 | -------------------------------------------------------------------------------- /infra/ansible/roles/dns_ospf/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | - name: Add WN 2 | ansible.posix.authorized_key: 3 | user: debian 4 | state: present 5 | key: "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBTf39rln9Gn4+xAq55izTsHaLrvwxe0KxdLdqN8AHV5 wilnil@willardpad" 6 | 7 | - name: Add AD 8 | ansible.posix.authorized_key: 9 | user: debian 10 | state: present 11 | key: "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAINqMvlwLFwGENT5WMjytI2iwVlHPV2TX+gOgBXj9kw4k andrew@localhost" 12 | 13 | - name: Install deps 14 | ansible.builtin.apt: 15 | lock_timeout: 120 16 | update_cache: true 17 | pkg: 18 | - iptables-persistent 19 | - cron 20 | 21 | - name: Netplan dummy0 interface 22 | ansible.builtin.template: 23 | src: netplan_dummy0.yaml.j2 24 | dest: /etc/netplan/dummy0.yaml 25 | mode: "640" 26 | 27 | - name: Netplan dummy1 interface 28 | ansible.builtin.template: 29 | src: netplan_dummy1.yaml.j2 30 | dest: /etc/netplan/dummy1.yaml 31 | mode: "640" 32 | when: EXTERNAL_LISTEN_IP != "" 33 | 34 | - name: Netplan dummy2 interface 35 | ansible.builtin.template: 36 | src: netplan_dummy2.yaml.j2 37 | dest: /etc/netplan/dummy2.yaml 38 | mode: "640" 39 | 40 | - name: Netplan dummy3 interface 41 | ansible.builtin.template: 42 | src: netplan_dummy3.yaml.j2 43 | dest: /etc/netplan/dummy3.yaml 44 | mode: "640" 45 | when: EXTERNAL_OUTGOING_IP != "" 46 | 47 | - name: Netplan eth0 interface 48 | ansible.builtin.template: 49 | src: netplan_50_cloud_init.yaml.j2 50 | dest: /etc/netplan/50-cloud-init.yaml 51 | mode: "640" 52 | 53 | - name: Iptables rules 54 | ansible.builtin.template: 55 | src: iptables.j2 56 | dest: /etc/iptables/rules.v4 57 | mode: "600" 58 | 59 | - name: Restore iptables rules 60 | ansible.builtin.command: 61 | cmd: "bash -c '/sbin/iptables-restore < /etc/iptables/rules.v4'" 62 | 63 | - name: Netplan apply 64 | ansible.builtin.command: 65 | cmd: "bash -c 'netplan apply && touch /tmp/netplan_applied'" 66 | creates: /tmp/netplan_applied 67 | 68 | - name: Restart and enable iptables service 69 | ansible.builtin.service: 70 | name: netfilter-persistent 71 | state: restarted 72 | enabled: true 73 | 74 | - name: Set net.ipv4.ip_forward 75 | ansible.posix.sysctl: 76 | name: net.ipv4.ip_forward 77 | value: '1' 78 | sysctl_set: true 79 | state: present 80 | reload: true 81 | 82 | - name: Import the bird_basic role from the nycmesh.common collection 83 | ansible.builtin.import_role: 84 | name: nycmesh.common.bird2_basic 85 | 86 | - name: Remove file 87 | ansible.builtin.file: 88 | path: /etc/cron.d/frr_reload 89 | state: absent 90 | 91 | - name: Restart and enable cron service 92 | ansible.builtin.systemd_service: 93 | state: restarted 94 | name: cron 95 | enabled: true 96 | 97 | - name: Set net.ipv4.ip_forward again 98 | ansible.posix.sysctl: 99 | name: net.ipv4.ip_forward 100 | value: '1' 101 | sysctl_set: true 102 | state: present 103 | reload: true 104 | 105 | - name: Remove frr 106 | ansible.builtin.apt: 107 | name: frr 108 | state: absent 109 | -------------------------------------------------------------------------------- /infra/ansible/roles/knot_authoritative/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | - name: Download knot gpg key 2 | ansible.builtin.get_url: 3 | url: https://pkg.labs.nic.cz/gpg 4 | dest: /usr/share/keyrings/cznic-labs-pkg.gpg 5 | mode: "0644" 6 | owner: root 7 | group: root 8 | 9 | - name: Setup knot apt repo 10 | ansible.builtin.copy: 11 | src: cznic-labs-knot-dns.list 12 | dest: /etc/apt/sources.list.d/cznic-labs-knot-dns.list 13 | mode: "644" 14 | owner: root 15 | group: root 16 | 17 | - name: Pin knot package 18 | ansible.builtin.copy: 19 | src: knot.apt.preferences.txt 20 | dest: /etc/apt/preferences.d/knot 21 | mode: "644" 22 | owner: root 23 | group: root 24 | 25 | - name: Install deps 26 | ansible.builtin.apt: 27 | lock_timeout: 240 28 | update_cache: true 29 | pkg: 30 | - apt-transport-https 31 | - ca-certificates 32 | - git 33 | - cron 34 | - python3-pip 35 | - python3.11-venv 36 | - dnsutils 37 | - knot=3.4.6-cznic.1~bookworm 38 | 39 | - name: Add WN 40 | ansible.posix.authorized_key: 41 | user: debian 42 | state: present 43 | key: "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBTf39rln9Gn4+xAq55izTsHaLrvwxe0KxdLdqN8AHV5 wilnil@willardpad" 44 | 45 | - name: Add AD 46 | ansible.posix.authorized_key: 47 | user: debian 48 | state: present 49 | key: "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAINqMvlwLFwGENT5WMjytI2iwVlHPV2TX+gOgBXj9kw4k andrew@localhost" 50 | 51 | - name: Knot DNS Config 52 | ansible.builtin.template: 53 | src: knot.conf.j2 54 | dest: /etc/knot/knot.conf 55 | owner: root 56 | group: knot 57 | mode: "640" 58 | 59 | - name: Make sure commit file does not exist 60 | ansible.builtin.file: 61 | path: /var/lib/knot/commit 62 | state: absent 63 | 64 | - name: Make sure file does not exist in the directory's place 65 | ansible.builtin.file: 66 | path: /var/lib/knot/zones 67 | state: absent 68 | when: "'/var/lib/knot/zones' is not ansible.builtin.directory" 69 | 70 | - name: Create a directory if it does not exist 71 | ansible.builtin.file: 72 | path: /var/lib/knot/zones 73 | state: directory 74 | owner: knot 75 | group: knot 76 | mode: "760" 77 | 78 | - name: Git checkout 79 | ansible.builtin.git: 80 | repo: "https://github.com/nycmeshnet/nycmesh-dns.git" 81 | dest: /root/nycmesh-dns 82 | version: main 83 | 84 | - name: Run deploy script 85 | ansible.builtin.command: 86 | cmd: bash -c "cd /root/nycmesh-dns/ && bash /root/nycmesh-dns/deploy_knot.sh" 87 | 88 | - name: Allow restarting of knot 89 | ansible.builtin.lineinfile: 90 | path: /lib/systemd/system/knot.service 91 | search_string: Restart= 92 | line: "Restart=always" 93 | 94 | - name: Restart and enable knot service 95 | ansible.builtin.systemd_service: 96 | name: knot 97 | state: restarted 98 | enabled: true 99 | daemon_reload: true 100 | 101 | - name: Crontab 102 | ansible.builtin.lineinfile: 103 | path: /etc/cron.d/knot_zone_update 104 | line: "*/10 * * * * root cd /root/nycmesh-dns && bash /root/nycmesh-dns/deploy_knot.sh 2>&1 > /dev/null" 105 | create: true 106 | mode: "644" 107 | 108 | - name: Restart and enable cron service 109 | ansible.builtin.service: 110 | name: cron 111 | state: restarted 112 | enabled: true 113 | -------------------------------------------------------------------------------- /.github/workflows/deploy_dns_environment.yaml: -------------------------------------------------------------------------------- 1 | name: Deploy DNS Server 2 | 3 | on: 4 | workflow_call: 5 | inputs: 6 | environment: 7 | required: true 8 | type: string 9 | 10 | permissions: read-all 11 | 12 | env: 13 | # Secrets 14 | TF_VAR_proxmox_host: ${{ secrets.TF_VAR_PROXMOX_HOST }} 15 | TF_VAR_proxmox_token_id: ${{ secrets.TF_VAR_PROXMOX_TOKEN_ID }} 16 | TF_VAR_proxmox_token_secret: ${{ secrets.TF_VAR_PROXMOX_TOKEN_SECRET }} 17 | TF_VAR_mesh_dns_local_password: ${{ secrets.TF_VAR_MESH_DNS_LOCAL_PASSWORD }} 18 | TF_VAR_influx_db_token: ${{ secrets.TF_VAR_INFLUX_DB_TOKEN }} 19 | TF_VAR_datadog_api_key: ${{ secrets.TF_VAR_DATADOG_API_KEY }} 20 | TF_VAR_dns_cookie_secret: "${{ secrets.TF_VAR_DNS_COOKIE_SECRET }}" 21 | TF_VAR_tsig_key_k8s_prod1: "${{ secrets.TF_VAR_TSIG_KEY_K8S_PROD1 }}" 22 | TF_VAR_tsig_key_k8s_prod2: "${{ secrets.TF_VAR_TSIG_KEY_K8S_PROD2 }}" 23 | TF_VAR_tsig_key_k8s_dev3: "${{ secrets.TF_VAR_TSIG_KEY_K8S_DEV3 }}" 24 | TF_VAR_tsig_key_ha: "${{ secrets.TF_VAR_TSIG_KEY_HOMEASSISTANT }}" 25 | TF_VAR_tsig_key_jon: "${{ secrets.TF_VAR_TSIG_KEY_JON }}" 26 | TF_VAR_tsig_key_10_r630_01: "${{ secrets.TF_VAR_TSIG_KEY_10_R630_01 }}" 27 | TF_VAR_tsig_key_713_r640_01: "${{ secrets.TF_VAR_TSIG_KEY_713_R640_01 }}" 28 | TF_VAR_tsig_key_doh: "${{ secrets.TF_VAR_TSIG_KEY_DOH }}" 29 | TF_VAR_tsig_key_nn: "${{ secrets.TF_VAR_TSIG_KEY_NN }}" 30 | # Credentials for deployment to AWS 31 | AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} 32 | AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} 33 | # S3 bucket for the Terraform state 34 | BUCKET_TF_STATE: ${{ secrets.BUCKET_TF_STATE}} 35 | 36 | jobs: 37 | deploy: 38 | runs-on: ubuntu-latest 39 | environment: ${{ inputs.environment }} 40 | steps: 41 | - name: Checkout 42 | uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 43 | 44 | - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 #@v5 45 | with: 46 | python-version: '3.11' 47 | 48 | - name: Setup ansible 49 | run: pip install --require-hashes -r requirements.txt && export PATH="$HOME/.local/bin:$PATH" && ansible-galaxy collection install --force -r infra/ansible/roles/requirements.yml 50 | 51 | - name: Setup Terraform with specified version on the runner 52 | uses: hashicorp/setup-terraform@b9cd54a3c349d3f38e8881555d616ced269862dd # @v3 53 | with: 54 | terraform_version: 1.8.3 55 | 56 | - name: Setup WireGuard 57 | run: | 58 | sudo apt-get update && sudo apt-get install -y wireguard 59 | echo "${{ secrets.WIREGUARD_PRIVATE_KEY }}" > privatekey 60 | sudo ip link add dev wg0 type wireguard 61 | sudo ip address add dev wg0 ${{ secrets.WIREGUARD_OVERLAY_NETWORK_IP }} peer ${{ secrets.WIREGUARD_PEER }} 62 | sudo wg set wg0 listen-port 48123 private-key privatekey peer ${{ secrets.WIREGUARD_PEER_PUBLIC_KEY }} allowed-ips 0.0.0.0/0 endpoint ${{ secrets.WIREGUARD_ENDPOINT }} 63 | sudo ip link set up dev wg0 64 | rm privatekey 65 | 66 | - name: Setup backend 67 | run: | 68 | echo "bucket = \"$BUCKET_TF_STATE\"" > backend.tfvars 69 | echo "key = \"terraform/state/dns-${{ inputs.environment }}.tfstate\"" >> backend.tfvars 70 | working-directory: ./infra/terraform/ 71 | 72 | - name: Terraform init 73 | id: init 74 | run: terraform init -backend-config=backend.tfvars 75 | working-directory: ./infra/terraform/ 76 | 77 | - name: Terraform validate 78 | run: terraform validate 79 | working-directory: ./infra/terraform/ 80 | 81 | - name: Terraform Apply 82 | run: | 83 | echo "${{ secrets.TF_VAR_MESH_DNS_SSH_KEY }}" > dns_ed25519 84 | echo "${{ secrets.TF_VAR_MESH_DNS_PUB_KEY }}" > dns_ed25519.pub 85 | chmod 600 dns_ed25519 86 | chmod 600 dns_ed25519.pub 87 | terraform apply -auto-approve -input=false -var-file=${{ inputs.environment }}.tfvars 88 | working-directory: ./infra/terraform/ 89 | 90 | - name: Run playbook 91 | run: sleep 45 && export PATH="$HOME/.local/bin:$PATH" && ansible-playbook -i inventory.yaml dns_server.yaml 92 | working-directory: ./infra/ansible/ 93 | -------------------------------------------------------------------------------- /infra/terraform/mesh_dns_servers/ansible.tf: -------------------------------------------------------------------------------- 1 | resource "ansible_group" "knot-recursive" { 2 | name = "knot-recursive" 3 | variables = { 4 | ansible_user = var.dns_local_user 5 | ansible_ssh_private_key_file = "../terraform/${var.dns_ssh_key_name}" 6 | ansible_ssh_common_args = "-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no" 7 | telegraf_knot = "" 8 | telegraf_kresd = "enable" 9 | DOH_SERVER = var.enable_doh 10 | MAIN_AUTH_SERVER_DOH = var.main_auth_server_ip 11 | TSIG_KEY_DOH = var.tsig_key_doh 12 | MESH_STUB_RESOLVER = var.mesh_stub_resolver 13 | } 14 | } 15 | 16 | resource "ansible_group" "knot-authoritative" { 17 | name = "knot-authoritative" 18 | variables = { 19 | ansible_user = var.dns_local_user 20 | ansible_ssh_private_key_file = "../terraform/${var.dns_ssh_key_name}" 21 | ansible_ssh_common_args = "-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no" 22 | telegraf_knot = "enable" 23 | telegraf_kresd = "" 24 | DOH_SERVER = "" 25 | DNS_COOKIE_SECRET = var.dns_cookie_secret 26 | TSIG_KEY_K8S_PROD1 = var.tsig_key_k8s_prod1 27 | TSIG_KEY_K8S_PROD2 = var.tsig_key_k8s_prod2 28 | TSIG_KEY_K8S_DEV3 = var.tsig_key_k8s_dev3 29 | TSIG_KEY_JON = var.tsig_key_jon 30 | TSIG_KEY_10_R630_01 = var.tsig_key_10_r630_01 31 | TSIG_KEY_713_R640_01 = var.tsig_key_713_r640_01 32 | TSIG_KEY_DOH = var.tsig_key_doh 33 | TSIG_KEY_NN = var.tsig_key_nn 34 | TSIG_KEY_HOMEASSISTANT = var.tsig_key_ha 35 | } 36 | } 37 | 38 | resource "ansible_host" "rec-dns-mgt" { 39 | count = length(var.dns_rec_mgt_ip) 40 | name = var.dns_rec_mgt_ip[count.index] 41 | groups = [ansible_group.knot-recursive.name] 42 | variables = { 43 | SERVER_HOSTNAME = "${var.hostname_prefix}-dns-rec-${sum([1, count.index, var.hostname_count_offset])}" 44 | bird_router_id = var.dns_rec_router_ip[count.index] 45 | bird_network = var.bird_network 46 | BIRD_OSPF_COST = var.bird_ospf_cost 47 | EXTERNAL_LISTEN_IP = var.dns_rec_external_ip[count.index] 48 | EXTERNAL_OUTGOING_IP = var.dns_rec_outgoing_ip[count.index] 49 | INTERNAL_NETWORK_HOST_IDENTIFIER = var.dns_mgt_network_host_identifier 50 | INTERNAL_LISTEN_IP = var.dns_rec_internal_ip[count.index] 51 | INTERNAL_MGT_IP = var.dns_rec_mgt_ip[count.index] 52 | INTERNAL_MGT_DG = var.dns_mgt_gateway 53 | INFLUX_DB_TOKEN = var.influx_db_token 54 | LOCAL_PASSWORD = var.mesh_dns_local_password 55 | DATADOG_API_KEY = var.datadog_api_key 56 | DATADOG_SITE = var.datadog_site 57 | CERTBOT_UPDATE_HOUR = tostring(sum([1, count.index, var.hostname_count_offset])) 58 | } 59 | } 60 | 61 | resource "ansible_host" "auth-dns-mgt" { 62 | count = length(var.dns_auth_mgt_ip) 63 | name = var.dns_auth_mgt_ip[count.index] 64 | groups = [ansible_group.knot-authoritative.name] 65 | variables = { 66 | SERVER_HOSTNAME = "${var.hostname_prefix}-dns-auth-${sum([1, count.index, var.hostname_count_offset])}" 67 | bird_router_id = var.dns_auth_router_ip[count.index] 68 | bird_network = var.bird_network 69 | BIRD_OSPF_COST = var.bird_ospf_cost 70 | EXTERNAL_LISTEN_IP = var.dns_auth_external_ip[count.index] 71 | EXTERNAL_OUTGOING_IP = "" 72 | INTERNAL_NETWORK_HOST_IDENTIFIER = var.dns_mgt_network_host_identifier 73 | INTERNAL_LISTEN_IP = var.dns_auth_internal_ip[count.index] 74 | INTERNAL_MGT_IP = var.dns_auth_mgt_ip[count.index] 75 | INTERNAL_MGT_DG = var.dns_mgt_gateway 76 | INFLUX_DB_TOKEN = var.influx_db_token 77 | LOCAL_PASSWORD = var.mesh_dns_local_password 78 | DATADOG_API_KEY = var.datadog_api_key 79 | DATADOG_SITE = var.datadog_site 80 | } 81 | } 82 | -------------------------------------------------------------------------------- /infra/ansible/roles/knot_authoritative/templates/knot.conf.j2: -------------------------------------------------------------------------------- 1 | 2 | server: 3 | {% if INTERNAL_LISTEN_IP != "" %} listen: {{ INTERNAL_LISTEN_IP }}@53 4 | {% endif %} 5 | {% if EXTERNAL_LISTEN_IP != "" %} listen: {{ EXTERNAL_LISTEN_IP }}@53 6 | {% endif %} 7 | 8 | log: 9 | - target: syslog 10 | any: info 11 | 12 | mod-cookies: 13 | - id: default 14 | secret: {{ DNS_COOKIE_SECRET }} 15 | 16 | mod-rrl: 17 | - id: default 18 | rate-limit: 200 19 | slip: 2 20 | 21 | template: 22 | - id: default 23 | global-module: mod-cookies/default 24 | global-module: mod-rrl/default 25 | global-module: mod-stats 26 | storage: /var/lib/knot/zones 27 | 28 | key: 29 | - id: k8s.lb.prod1 30 | algorithm: hmac-sha512 31 | secret: {{ TSIG_KEY_K8S_PROD1 }} 32 | - id: k8s.lb.prod2 33 | algorithm: hmac-sha512 34 | secret: {{ TSIG_KEY_K8S_PROD2 }} 35 | - id: k8s.lb.dev3 36 | algorithm: hmac-sha512 37 | secret: {{ TSIG_KEY_K8S_DEV3 }} 38 | - id: jon.prox.mesh.nycmesh.net. 39 | algorithm: hmac-sha512 40 | secret: {{ TSIG_KEY_JON }} 41 | - id: nycmesh-10-r630-01.prox.mesh.nycmesh.net. 42 | algorithm: hmac-sha512 43 | secret: {{ TSIG_KEY_10_R630_01 }} 44 | - id: nycmesh-713-r640-01.prox.mesh.nycmesh.net. 45 | algorithm: hmac-sha512 46 | secret: {{ TSIG_KEY_713_R640_01 }} 47 | - id: doh.mesh.nycmesh.net. 48 | algorithm: hmac-sha512 49 | secret: {{ TSIG_KEY_DOH }} 50 | - id: nn.mesh.nycmesh.net. 51 | algorithm: hmac-sha512 52 | secret: {{ TSIG_KEY_NN }} 53 | - id: ha.mesh.nycmesh.net. 54 | algorithm: hmac-sha512 55 | secret: {{ TSIG_KEY_HOMEASSISTANT }} 56 | 57 | acl: 58 | - id: acl-update-building 59 | action: update 60 | update-type: [TXT] 61 | update-owner-match: equal 62 | update-owner-name: acme-challenge.building.mesh.nycmesh.net. 63 | key: k8s.lb.prod1 64 | - id: acl-update-buildingdev 65 | action: update 66 | update-type: [TXT] 67 | update-owner-match: equal 68 | update-owner-name: acme-challenge.building-dev.mesh.nycmesh.net. 69 | key: k8s.lb.dev3 70 | - id: acl-update-jamesinternaldev3 71 | action: update 72 | update-type: [TXT] 73 | update-owner-match: equal 74 | update-owner-name: acme-challenge.building.mesh.nycmesh.net. 75 | key: k8s.lb.dev3 76 | - id: acl-update-jamesinternalprodtwo 77 | action: update 78 | update-type: [TXT] 79 | update-owner-match: equal 80 | update-owner-name: acme-challenge.jamesinternalprodtwo.mesh.nycmesh.net. 81 | key: k8s.lb.prod2 82 | - id: acl-update-jon 83 | action: update 84 | update-type: [TXT] 85 | update-owner-match: equal 86 | update-owner-name: acme-challenge.jon.prox.mesh.nycmesh.net. 87 | key: jon.prox.mesh.nycmesh.net. 88 | - id: acl-update-nycmesh-10-r630-01 89 | action: update 90 | update-type: [TXT] 91 | update-owner-match: equal 92 | update-owner-name: acme-challenge.nycmesh-10-r630-01.prox.mesh.nycmesh.net. 93 | key: nycmesh-10-r630-01.prox.mesh.nycmesh.net. 94 | - id: acl-update-nycmesh-713-r640-01 95 | action: update 96 | update-type: [TXT] 97 | update-owner-match: equal 98 | update-owner-name: acme-challenge.nycmesh-713-r640-01.prox.mesh.nycmesh.net. 99 | key: nycmesh-713-r640-01.prox.mesh.nycmesh.net. 100 | - id: acl-update-doh 101 | action: update 102 | update-type: [TXT] 103 | update-owner-match: equal 104 | update-owner-name: acme-challenge.doh.mesh.nycmesh.net. 105 | key: doh.mesh.nycmesh.net. 106 | - id: acl-update-nn 107 | action: update 108 | update-type: [TXT] 109 | update-owner-match: pattern 110 | update-owner-name: acme-challenge.*.nn.mesh.nycmesh.net. 111 | key: nn.mesh.nycmesh.net. 112 | - id: acl-update-ha 113 | action: update 114 | update-type: [TXT] 115 | update-owner-match: equal 116 | update-owner-name: acme-challenge.ha.mesh.nycmesh.net. 117 | key: ha.mesh.nycmesh.net. 118 | - id: acl-update-devairflow 119 | action: update 120 | update-type: [TXT] 121 | update-owner-match: equal 122 | update-owner-name: acme-challenge.devairflow.mesh.nycmesh.net. 123 | key: k8s.lb.dev3 124 | - id: acl-update-airflow 125 | action: update 126 | update-type: [TXT] 127 | update-owner-match: equal 128 | update-owner-name: acme-challenge.airflow.mesh.nycmesh.net. 129 | key: k8s.lb.prod1 130 | - id: acl-update-jenkins 131 | action: update 132 | update-type: [TXT] 133 | update-owner-match: equal 134 | update-owner-name: acme-challenge.jenkins.mesh.nycmesh.net. 135 | key: k8s.lb.prod1 136 | 137 | zone: 138 | - domain: mesh 139 | - domain: mesh.nycmesh.net 140 | file: mesh.zone 141 | - domain: doh.mesh.nycmesh.net 142 | file: doh.mesh.nycmesh.net.zone 143 | acl: acl-update-doh 144 | - domain: nn.mesh.nycmesh.net 145 | file: nn.zone 146 | acl: acl-update-nn 147 | - domain: ha.mesh.nycmesh.net 148 | file: ha.mesh.nycmesh.net.zone 149 | acl: acl-update-ha 150 | - domain: devairflow.mesh.nycmesh.net 151 | file: devairflow.mesh.nycmesh.net.zone 152 | acl: acl-update-devairflow 153 | - domain: airflow.mesh.nycmesh.net 154 | file: airflow.mesh.nycmesh.net.zone 155 | acl: acl-update-airflow 156 | - domain: prox.mesh.nycmesh.net 157 | file: prox.mesh.nycmesh.net.zone 158 | acl: [acl-update-jon, acl-update-nycmesh-10-r630-01, acl-update-nycmesh-713-r640-01] 159 | - domain: building.mesh.nycmesh.net 160 | file: building.mesh.nycmesh.net.zone 161 | acl: acl-update-building 162 | - domain: building-dev.mesh.nycmesh.net 163 | file: building-dev.mesh.nycmesh.net.zone 164 | acl: acl-update-buildingdev 165 | - domain: jamesinternalprodtwo.mesh.nycmesh.net 166 | file: jamesinternalprodtwo.mesh.nycmesh.net.zone 167 | acl: acl-update-jamesinternalprodtwo 168 | - domain: nycmeshconnect.com 169 | file: nycmeshconnect.com.zone 170 | - domain: nycmeshconnect.net 171 | file: nycmeshconnect.net.zone 172 | - domain: themesh.nyc 173 | file: themesh.nyc.zone 174 | - domain: 10.in-addr.arpa 175 | - domain: 59.167.199.in-addr.arpa 176 | - domain: jenkins.mesh.nycmesh.net 177 | file: jenkins.mesh.nycmesh.net.zone 178 | acl: acl-update-jenkins 179 | -------------------------------------------------------------------------------- /infra/ansible/roles/knot_recursive/templates/kresd.conf.j2: -------------------------------------------------------------------------------- 1 | -- Network interface configuration 2 | net.listen('127.0.0.1', 53, { kind = 'dns' }) 3 | net.listen('127.0.0.1', 853, { kind = 'tls' }) 4 | --net.listen('127.0.0.1', 443, { kind = 'doh2' }) 5 | net.listen('127.0.0.1', 9100, {kind = 'webmgmt'}) 6 | 7 | -- INTERNAL_LISTEN_IP 8 | net.listen('{{ INTERNAL_LISTEN_IP }}', 53, { kind = 'dns' }) 9 | net.listen('{{ INTERNAL_LISTEN_IP }}', 443, { kind = 'doh2' }) 10 | 11 | {% if EXTERNAL_LISTEN_IP != "" %} 12 | -- EXTERNAL_LISTEN_IP 13 | net.listen('{{ EXTERNAL_LISTEN_IP }}', 53, { kind = 'dns' }) 14 | net.listen('{{ EXTERNAL_LISTEN_IP }}', 443, { kind = 'doh2' }) 15 | {% endif %} 16 | 17 | -- No ipv6 18 | net.ipv6 = false 19 | 20 | {% if EXTERNAL_OUTGOING_IP != "" %} 21 | -- EXTERNAL_OUTGOING_IP 22 | -- Not until things are sorted out with the delegated subdomains, but keep the IPs 23 | --net.outgoing_v4('{{ EXTERNAL_OUTGOING_IP }}') 24 | {% endif %} 25 | 26 | -- Load useful modules 27 | modules = { 28 | 'hints > iterate', -- Allow loading /etc/hosts or custom root hints 29 | 'stats', -- Track internal statistics 30 | 'http', -- Stats in metrics format 31 | 'policy', -- Allow/deny requests 32 | 'view', -- Consider query sources 33 | 'nsid', -- Name Server Identifier (NSID) 34 | } 35 | 36 | -- Set NSID 37 | nsid.name('{{ SERVER_HOSTNAME }}') 38 | 39 | {% if DOH_SERVER != "" %} 40 | net.tls("/etc/knot-resolver/server-cert.pem", "/etc/knot-resolver/server-key.pem") 41 | {% endif %} 42 | 43 | -- Subdomains delegated outside of "this" server from within the mesh 44 | view:addr('10.0.0.0/8', policy.suffix(policy.STUB('10.70.90.174'), policy.todnames({'em.mesh.', 'em.mesh.nycmesh.net.'}))) 45 | view:addr('23.158.16.0/24', policy.suffix(policy.STUB('10.70.90.174'), policy.todnames({'em.mesh.', 'em.mesh.nycmesh.net.'}))) 46 | view:addr('199.167.59.0/24', policy.suffix(policy.STUB('10.70.90.174'), policy.todnames({'em.mesh.', 'em.mesh.nycmesh.net.'}))) 47 | view:addr('199.170.132.0/24', policy.suffix(policy.STUB('10.70.90.174'), policy.todnames({'em.mesh.', 'em.mesh.nycmesh.net.'}))) 48 | view:addr('208.68.5.0/24', policy.suffix(policy.STUB('10.70.90.174'), policy.todnames({'em.mesh.', 'em.mesh.nycmesh.net.'}))) 49 | 50 | view:addr('10.0.0.0/8', policy.suffix(policy.STUB('10.70.132.1'), policy.todnames({'zrg.mesh.', 'zrg.mesh.nycmesh.net.', 'n363.mesh.', 'n363.mesh.nycmesh.net.'}))) 51 | view:addr('23.158.16.0/24', policy.suffix(policy.STUB('10.70.132.1'), policy.todnames({'zrg.mesh.', 'zrg.mesh.nycmesh.net.', 'n363.mesh.', 'n363.mesh.nycmesh.net.'}))) 52 | view:addr('199.167.59.0/24', policy.suffix(policy.STUB('10.70.132.1'), policy.todnames({'zrg.mesh.', 'zrg.mesh.nycmesh.net.', 'n363.mesh.', 'n363.mesh.nycmesh.net.'}))) 53 | view:addr('199.170.132.0/24', policy.suffix(policy.STUB('10.70.132.1'), policy.todnames({'zrg.mesh.', 'zrg.mesh.nycmesh.net.', 'n363.mesh.', 'n363.mesh.nycmesh.net.'}))) 54 | view:addr('208.68.5.0/24', policy.suffix(policy.STUB('10.70.132.1'), policy.todnames({'zrg.mesh.', 'zrg.mesh.nycmesh.net.', 'n363.mesh.', 'n363.mesh.nycmesh.net.'}))) 55 | 56 | view:addr('10.0.0.0/8', policy.suffix(policy.STUB('10.70.90.50'), policy.todnames({'daniel.mesh.', 'daniel.mesh.nycmesh.net.'}))) 57 | view:addr('23.158.16.0/24', policy.suffix(policy.STUB('10.70.90.50'), policy.todnames({'daniel.mesh.', 'daniel.mesh.nycmesh.net.'}))) 58 | view:addr('199.167.59.0/24', policy.suffix(policy.STUB('10.70.90.50'), policy.todnames({'daniel.mesh.', 'daniel.mesh.nycmesh.net.'}))) 59 | view:addr('199.170.132.0/24', policy.suffix(policy.STUB('10.70.90.50'), policy.todnames({'daniel.mesh.', 'daniel.mesh.nycmesh.net.'}))) 60 | view:addr('208.68.5.0/24', policy.suffix(policy.STUB('10.70.90.50'), policy.todnames({'daniel.mesh.', 'daniel.mesh.nycmesh.net.'}))) 61 | 62 | view:addr('10.0.0.0/8', policy.suffix(policy.STUB('54.161.165.190'), policy.todnames({'andrew.mesh.', 'andrew.mesh.nycmesh.net.'}))) 63 | view:addr('23.158.16.0/24', policy.suffix(policy.STUB('54.161.165.190'), policy.todnames({'andrew.mesh.', 'andrew.mesh.nycmesh.net.'}))) 64 | view:addr('199.167.59.0/24', policy.suffix(policy.STUB('54.161.165.190'), policy.todnames({'andrew.mesh.', 'andrew.mesh.nycmesh.net.'}))) 65 | view:addr('199.170.132.0/24', policy.suffix(policy.STUB('54.161.165.190'), policy.todnames({'andrew.mesh.', 'andrew.mesh.nycmesh.net.'}))) 66 | view:addr('208.68.5.0/24', policy.suffix(policy.STUB('54.161.165.190'), policy.todnames({'andrew.mesh.', 'andrew.mesh.nycmesh.net.'}))) 67 | 68 | -- Mesh from mesh 69 | view:addr('10.0.0.0/8', policy.suffix(policy.STUB('{{ MESH_STUB_RESOLVER }}'), policy.todnames({'mesh.', 'mesh.nycmesh.net.'}))) 70 | view:addr('23.158.16.0/24', policy.suffix(policy.STUB('{{ MESH_STUB_RESOLVER }}'), policy.todnames({'mesh.', 'mesh.nycmesh.net.'}))) 71 | view:addr('199.167.59.0/24', policy.suffix(policy.STUB('{{ MESH_STUB_RESOLVER }}'), policy.todnames({'mesh.', 'mesh.nycmesh.net.'}))) 72 | view:addr('199.170.132.0/24', policy.suffix(policy.STUB('{{ MESH_STUB_RESOLVER }}'), policy.todnames({'mesh.', 'mesh.nycmesh.net.'}))) 73 | view:addr('208.68.5.0/24', policy.suffix(policy.STUB('{{ MESH_STUB_RESOLVER }}'), policy.todnames({'mesh.', 'mesh.nycmesh.net.'}))) 74 | 75 | -- Allow all from mesh 76 | view:addr('127.0.0.0/8', policy.all(policy.PASS)) 77 | view:addr('10.0.0.0/8', policy.all(policy.PASS)) 78 | view:addr('23.158.16.0/24', policy.all(policy.PASS)) 79 | view:addr('199.167.59.0/24', policy.all(policy.PASS)) 80 | view:addr('199.170.132.0/24', policy.all(policy.PASS)) 81 | view:addr('208.68.5.0/24', policy.all(policy.PASS)) 82 | 83 | -- Do not answer requests from bogons 84 | view:addr('0.0.0.0/8', policy.all(policy.NO_ANSWER)) -- This 85 | view:addr('100.64.0.0/10', policy.all(policy.NO_ANSWER)) -- CGNAT 86 | view:addr('169.254.0.0/16', policy.all(policy.NO_ANSWER)) -- Local link 87 | view:addr('172.16.0.0/12', policy.all(policy.NO_ANSWER)) -- Private 88 | view:addr('192.0.0.0/24', policy.all(policy.NO_ANSWER)) -- Private 89 | view:addr('192.0.2.0/24', policy.all(policy.NO_ANSWER)) -- Test net 90 | view:addr('192.168.0.0/16', policy.all(policy.NO_ANSWER)) --Private 91 | view:addr('198.18.0.0/15', policy.all(policy.NO_ANSWER)) -- Private 92 | view:addr('198.51.100.0/24', policy.all(policy.NO_ANSWER)) -- Test net 93 | view:addr('203.0.113.0/24', policy.all(policy.NO_ANSWER)) -- Test net 94 | view:addr('224.0.0.0/3', policy.all(policy.NO_ANSWER)) -- Multicast 95 | 96 | -- Deny other 97 | view:addr('0.0.0.0/0', policy.all(policy.REFUSE)) 98 | 99 | 100 | -- Cache size 101 | cache.storage = 'lmdb:///var/cache/knot-resolver-cache' 102 | cache.size = cache.fssize() - 1000*MB - 10*MB 103 | -------------------------------------------------------------------------------- /infra/terraform/vars.tf: -------------------------------------------------------------------------------- 1 | variable "proxmox_host" { 2 | type = string 3 | description = "ip/domain of the proxmox server" 4 | default = "10.70.90.52" 5 | } 6 | 7 | variable "proxmox_token_id" { 8 | type = string 9 | description = "proxmox server token id" 10 | } 11 | 12 | variable "proxmox_token_secret" { 13 | type = string 14 | description = "proxmox server token secret" 15 | } 16 | 17 | variable "proxmox_node" { 18 | type = string 19 | description = "target node on the proxmox server" 20 | } 21 | 22 | variable "proxmox_storage_location" { 23 | type = string 24 | description = "target resource pool on the proxmox server" 25 | default = "local-lvm" 26 | } 27 | 28 | variable "mesh_dns_local_password" { 29 | type = string 30 | description = "password of the local user for the vm" 31 | sensitive = true 32 | } 33 | 34 | variable "influx_db_token" { 35 | type = string 36 | description = "token for influx db" 37 | sensitive = true 38 | } 39 | 40 | variable "datadog_api_key" { 41 | type = string 42 | description = "API key for datadog" 43 | sensitive = true 44 | } 45 | 46 | variable "datadog_site" { 47 | type = string 48 | description = "URL for datadog" 49 | default = "us5.datadoghq.com" 50 | } 51 | 52 | variable "dns_auth_mgt_ip" { 53 | type = list(any) 54 | description = "management IPs for the authoritative dns vm(s)" 55 | } 56 | 57 | variable "dns_rec_mgt_ip" { 58 | type = list(any) 59 | description = "management IPs for the recursive dns vm(s)" 60 | } 61 | 62 | variable "dns_auth_router_ip" { 63 | type = list(string) 64 | description = "ospf router IDs for the authoritative dns vm(s)" 65 | } 66 | 67 | variable "bird_network" { 68 | type = string 69 | description = "bird ospf network for dns vm(s)" 70 | default = "10.69.0.0/16" 71 | } 72 | 73 | variable "dns_rec_router_ip" { 74 | type = list(string) 75 | description = "ospf router IDs for the recursive dns vm(s)" 76 | } 77 | 78 | variable "dns_auth_internal_ip" { 79 | type = list(any) 80 | description = "internal listen IPs for the authoritative dns vm(s)" 81 | } 82 | 83 | variable "dns_rec_internal_ip" { 84 | type = list(any) 85 | description = "internal listen IPs for the recursive dns vm(s)" 86 | } 87 | 88 | variable "dns_auth_external_ip" { 89 | type = list(any) 90 | description = "external listen IPs for the authoritative dns vm(s), empty string for none" 91 | } 92 | 93 | variable "dns_rec_external_ip" { 94 | type = list(any) 95 | description = "external listen IPs for the recursive dns vm(s), empty string for none" 96 | } 97 | 98 | variable "dns_rec_outgoing_ip" { 99 | type = list(any) 100 | description = "external IPs used to resolve recursive dns queries, empty string for none" 101 | } 102 | 103 | variable "dns_mgt_network_host_identifier" { 104 | type = string 105 | description = "default network range for the vm" 106 | } 107 | 108 | variable "dns_mgt_gateway" { 109 | type = string 110 | description = "default gateway for the vm" 111 | } 112 | 113 | variable "vm_nic" { 114 | type = string 115 | description = "nic for the vms" 116 | default = "vmbr0" 117 | } 118 | 119 | variable "hostname_prefix" { 120 | type = string 121 | description = "previous dns hosts (this + index)" 122 | default = 0 123 | } 124 | 125 | variable "hostname_count_offset" { 126 | type = number 127 | description = "prefix for the VM hostnames" 128 | default = 0 129 | } 130 | 131 | variable "recursive_cores" { 132 | type = string 133 | description = "cpu core count" 134 | default = 2 135 | } 136 | variable "recursive_sockets" { 137 | type = string 138 | description = "socket count" 139 | default = 1 140 | } 141 | 142 | variable "recursive_memory" { 143 | type = string 144 | description = "RAM MB count" 145 | default = 2560 146 | } 147 | 148 | variable "authoritative_cores" { 149 | type = string 150 | description = "cpu core count" 151 | default = 2 152 | } 153 | 154 | variable "authoritative_sockets" { 155 | type = string 156 | description = "socket count" 157 | default = 1 158 | } 159 | 160 | variable "authoritative_memory" { 161 | type = string 162 | description = "RAM MB count" 163 | default = 2560 164 | } 165 | 166 | variable "dns_cookie_secret" { 167 | type = string 168 | description = "0x{32 hex chars} used for secret for dns cookies" 169 | sensitive = true 170 | } 171 | 172 | variable "tsig_key_k8s_prod1" { 173 | type = string 174 | description = "TSIG key for the zone updates from prod1 k8s lb" 175 | sensitive = true 176 | } 177 | 178 | variable "tsig_key_k8s_prod2" { 179 | type = string 180 | description = "TSIG key for the zone updates from prod2 k8s lb" 181 | sensitive = true 182 | } 183 | 184 | variable "tsig_key_k8s_dev3" { 185 | type = string 186 | description = "TSIG key for the zone updates from dev3 k8s lb" 187 | sensitive = true 188 | } 189 | 190 | variable "tsig_key_jon" { 191 | type = string 192 | description = "TSIG key for the jon.mesh.nycmesh.net zone" 193 | sensitive = true 194 | } 195 | 196 | variable "tsig_key_10_r630_01" { 197 | type = string 198 | description = "TSIG key for the nycmesh-10-r630-01.mesh.nycmesh.net zone" 199 | sensitive = true 200 | } 201 | 202 | variable "tsig_key_713_r640_01" { 203 | type = string 204 | description = "TSIG key for the nycmesh-713-r640-01.mesh.nycmesh.net zone" 205 | sensitive = true 206 | } 207 | 208 | variable "tsig_key_doh" { 209 | type = string 210 | description = "TSIG key for the doh.mesh.nycmesh.net zone" 211 | sensitive = true 212 | } 213 | 214 | variable "tsig_key_nn" { 215 | type = string 216 | description = "TSIG key for the nn.mesh.nycmesh.net zone" 217 | sensitive = true 218 | } 219 | 220 | variable "tsig_key_ha" { 221 | type = string 222 | description = "TSIG key for the ha.mesh.nycmesh.net zone" 223 | sensitive = true 224 | } 225 | 226 | variable "enable_doh" { 227 | type = string 228 | description = "Enable doh server on recursive resolver" 229 | default = "" 230 | } 231 | 232 | variable "main_auth_server_ip" { 233 | type = string 234 | description = "authoritative server IP to use for DoH certs" 235 | default = "199.170.132.47" 236 | } 237 | 238 | variable "mesh_stub_resolver" { 239 | type = string 240 | description = "resolver for mesh bound queries" 241 | } 242 | 243 | variable "bird_ospf_cost" { 244 | type = string 245 | description = "OSPF cost for only bird" 246 | default = "10" 247 | } 248 | -------------------------------------------------------------------------------- /infra/terraform/mesh_dns_servers/vars.tf: -------------------------------------------------------------------------------- 1 | variable "proxmox_node" { 2 | type = string 3 | description = "target node on the proxmox server" 4 | default = "jon" 5 | } 6 | 7 | variable "proxmox_template_image" { 8 | type = string 9 | description = "name of the template you have already setup in proxmox" 10 | default = "debian-cloud" 11 | } 12 | 13 | variable "proxmox_storage_location" { 14 | type = string 15 | description = "target resource pool on the proxmox server" 16 | default = "local-lvm" 17 | } 18 | 19 | variable "dns_local_user" { 20 | type = string 21 | description = "default user for the vm" 22 | default = "debian" 23 | } 24 | 25 | variable "mesh_dns_local_password" { 26 | type = string 27 | description = "password of the local user for the vm" 28 | sensitive = true 29 | } 30 | 31 | variable "dns_auth_mgt_ip" { 32 | type = list(any) 33 | description = "management IPs for the authoritative dns vm(s)" 34 | } 35 | 36 | variable "dns_rec_mgt_ip" { 37 | type = list(any) 38 | description = "management IPs for the recursive dns vm(s)" 39 | } 40 | 41 | variable "dns_auth_router_ip" { 42 | type = list(string) 43 | description = "ospf router IDs for the authoritative dns vm(s)" 44 | } 45 | 46 | variable "bird_network" { 47 | type = string 48 | description = "bird ospf network for dns vm(s)" 49 | default = "10.69.0.0/16" 50 | } 51 | 52 | variable "dns_rec_router_ip" { 53 | type = list(string) 54 | description = "ospf router IDs for the recursive dns vm(s)" 55 | } 56 | 57 | variable "dns_auth_internal_ip" { 58 | type = list(any) 59 | description = "internal listen IPs for the authoritative dns vm(s)" 60 | } 61 | 62 | variable "dns_rec_internal_ip" { 63 | type = list(any) 64 | description = "internal listen IPs for the recursive dns vm(s)" 65 | } 66 | 67 | variable "dns_auth_external_ip" { 68 | type = list(any) 69 | description = "external listen IPs for the authoritative dns vm(s), empty string for none" 70 | } 71 | 72 | variable "dns_rec_external_ip" { 73 | type = list(any) 74 | description = "external listen IPs for the recursive dns vm(s), empty string for none" 75 | } 76 | 77 | variable "dns_rec_outgoing_ip" { 78 | type = list(any) 79 | description = "external IPs used to resolve recursive dns queries, empty string for none" 80 | } 81 | 82 | variable "dns_mgt_network_host_identifier" { 83 | type = string 84 | description = "default network range for the vm" 85 | } 86 | 87 | variable "dns_mgt_gateway" { 88 | type = string 89 | description = "default gateway for the vm" 90 | } 91 | 92 | variable "dns_ssh_key_name" { 93 | type = string 94 | description = "ssh key name" 95 | } 96 | 97 | variable "influx_db_token" { 98 | type = string 99 | description = "token for influx db" 100 | sensitive = true 101 | } 102 | 103 | variable "datadog_api_key" { 104 | type = string 105 | description = "API key for datadog" 106 | sensitive = true 107 | } 108 | 109 | variable "datadog_site" { 110 | type = string 111 | description = "URL for datadog" 112 | } 113 | 114 | variable "vm_nic" { 115 | type = string 116 | description = "nic for the vms" 117 | default = "vmbr0" 118 | } 119 | 120 | variable "hostname_prefix" { 121 | type = string 122 | description = "previous dns hosts (this + index)" 123 | default = 0 124 | } 125 | 126 | variable "hostname_count_offset" { 127 | type = number 128 | description = "prefix for the VM hostnames" 129 | default = 0 130 | } 131 | 132 | variable "recursive_cores" { 133 | type = string 134 | description = "cpu core count" 135 | default = 2 136 | } 137 | variable "recursive_sockets" { 138 | type = string 139 | description = "socket count" 140 | default = 1 141 | } 142 | 143 | variable "recursive_memory" { 144 | type = string 145 | description = "RAM MB count" 146 | default = 2560 147 | } 148 | 149 | variable "authoritative_cores" { 150 | type = string 151 | description = "cpu core count" 152 | default = 2 153 | } 154 | 155 | variable "authoritative_sockets" { 156 | type = string 157 | description = "socket count" 158 | default = 1 159 | } 160 | 161 | variable "authoritative_memory" { 162 | type = string 163 | description = "RAM MB count" 164 | default = 2560 165 | } 166 | 167 | variable "dns_cookie_secret" { 168 | type = string 169 | description = "0x{32 hex chars} used for secret for dns cookies" 170 | sensitive = true 171 | } 172 | 173 | variable "tsig_key_k8s_prod1" { 174 | type = string 175 | description = "TSIG key for the zone updates from prod1 k8s lb" 176 | sensitive = true 177 | } 178 | 179 | variable "tsig_key_k8s_prod2" { 180 | type = string 181 | description = "TSIG key for the zone updates from prod2 k8s lb" 182 | sensitive = true 183 | } 184 | 185 | variable "tsig_key_k8s_dev3" { 186 | type = string 187 | description = "TSIG key for the zone updates from dev3 k8s lb" 188 | sensitive = true 189 | } 190 | 191 | variable "tsig_key_jon" { 192 | type = string 193 | description = "TSIG key for the jon.mesh.nycmesh.net zone" 194 | sensitive = true 195 | } 196 | 197 | variable "tsig_key_10_r630_01" { 198 | type = string 199 | description = "TSIG key for the nycmesh-10-r630-01.mesh.nycmesh.net zone" 200 | sensitive = true 201 | } 202 | 203 | variable "tsig_key_713_r640_01" { 204 | type = string 205 | description = "TSIG key for the nycmesh-713-r640-01.mesh.nycmesh.net zone" 206 | sensitive = true 207 | } 208 | 209 | variable "tsig_key_doh" { 210 | type = string 211 | description = "TSIG key for the doh.mesh.nycmesh.net zone" 212 | sensitive = true 213 | } 214 | 215 | variable "tsig_key_nn" { 216 | type = string 217 | description = "TSIG key for the nn.mesh.nycmesh.net zone" 218 | sensitive = true 219 | } 220 | 221 | variable "tsig_key_ha" { 222 | type = string 223 | description = "TSIG key for the ha.mesh.nycmesh.net zone" 224 | sensitive = true 225 | } 226 | 227 | variable "enable_doh" { 228 | type = string 229 | description = "Enable doh server on recursive resolver" 230 | default = "" 231 | } 232 | 233 | variable "main_auth_server_ip" { 234 | type = string 235 | description = "authoritative server IP to use for DoH certs" 236 | } 237 | 238 | variable "mesh_stub_resolver" { 239 | type = string 240 | description = "resolver for mesh bound queries" 241 | } 242 | 243 | variable "bird_ospf_cost" { 244 | type = string 245 | description = "OSPF cost for only bird" 246 | default = "10" 247 | } 248 | -------------------------------------------------------------------------------- /mesh.zone: -------------------------------------------------------------------------------- 1 | ; $ORIGIN mesh. 2 | $TTL 3600 3 | @ SOA nycmesh-713-dns-auth-3 hostmaster.nycmesh.net. ( 2024120100 1d 2h 4w 1h ) 4 | @ NS nycmesh-713-dns-auth-3 5 | @ NS nycmesh-10-dns-auth-5 6 | @ A 10.10.10.11 ; IPv4 address for example.com 7 | 8 | ; Allocated domains 9 | dns A 10.10.10.10 10 | ns A 10.10.10.11 11 | nycmesh-10-dns-auth-5 A 23.158.16.23 12 | nycmesh-713-dns-auth-3 A 199.170.132.47 13 | nycmesh-713-jon-dns-auth-1 A 199.170.132.48 14 | something A 1.1.1.1 15 | unms CNAME uisp 16 | uisp A 10.70.76.21 17 | donuts A 10.70.73.29 18 | unifi A 10.70.90.158 19 | alerts A 10.70.178.21 20 | wiki A 199.170.132.45 21 | mastodon A 199.170.132.45 22 | building NS nycmesh-713-dns-auth-3 23 | building-dev NS nycmesh-713-dns-auth-3 24 | grafana A 10.70.90.82 25 | jon A 10.70.90.52 26 | nycmesh-10-r630-01 A 10.70.103.186 27 | nycmesh-10-r630-01-mgmt A 10.70.103.187 28 | nycmesh-713-r630-01 A 10.70.103.188 29 | nycmesh-713-r630-01-mgmt A 10.70.103.189 30 | devwiki A 199.170.132.46 31 | nycmesh-713-r640-01 A 10.70.90.195 32 | nycmesh-713-r640-02 A 10.70.90.196 33 | nycmesh-11 A 10.70.104.12 34 | grafana-new A 10.70.90.146 35 | rsyslog A 10.10.5.14 36 | freepbx A 10.70.90.62 37 | 38 | ; Emerson 39 | gitlab A 10.70.123.5 40 | em NS em-router.members.sn3 41 | em-router.members.sn3 A 10.70.90.174 42 | 43 | ; Zach 44 | search A 10.70.132.57 45 | docs A 10.70.132.58 46 | zrg NS nycmesh-363-exit 47 | n363 NS nycmesh-363-exit 48 | nycmesh-363-exit A 10.70.132.1 49 | 50 | ; Dan 51 | n265 A 10.69.2.65 52 | 53 | ; Daniel 54 | daniel-dns A 10.70.90.50 55 | daniel NS daniel-dns 56 | nyckml A 199.170.132.45 57 | nyckmldev A 199.170.132.46 58 | 59 | ; micro 60 | crichton A 10.70.90.86 61 | solar A 10.96.63.136 62 | 63 | ; myf, oliver 64 | petra A 10.70.90.87 65 | ipmi.petra A 10.70.90.89 66 | 67 | ; Andrew 68 | andrew-vpn-endpoint-router A 54.161.165.190 69 | andrew NS andrew-vpn-endpoint-router 70 | 71 | ; Willard 72 | zabbix A 10.70.90.40 73 | status A 164.92.117.225 74 | jenkins NS nycmesh-713-dns-auth-3 75 | 76 | ; James 77 | scan A 10.70.90.120 78 | wazuh A 10.70.90.75 79 | jamesprojects A 10.70.90.53 80 | jmstemp A 199.170.132.45 81 | jamesinternalprodtwo NS nycmesh-10-dns-auth-5 82 | 83 | ; airflow 84 | airflow NS nycmesh-713-dns-auth-3 85 | devairflow NS nycmesh-713-dns-auth-3 86 | 87 | ; doh 88 | doh NS nycmesh-713-dns-auth-3 89 | 90 | ; nn 91 | nn NS nycmesh-713-dns-auth-3 92 | 93 | ; Proxmox 94 | prox NS nycmesh-713-dns-auth-3 95 | 96 | ; David K 97 | emergency-dev A 10.70.90.161 98 | 99 | ; Scott145 100 | johnb-grafana-lxc A 10.70.90.42 101 | grafjohna CNAME johnb-grafana-lxc 102 | nycmesh-10-gpubuntu-01 A 10.70.100.42 103 | ospf-scilloscope-backend CNAME nycmesh-10-gpubuntu-01 104 | 105 | ; Reserved as services are made 106 | null A 10.10.10.254 107 | vpn A 10.10.10.254 108 | core A 10.10.10.254 109 | user A 10.10.10.254 110 | nms A 10.70.118.10 111 | scuttle A 10.70.73.4 112 | lte A 10.70.90.170 113 | 114 | ; The Room 115 | room A 10.69.30.65 116 | meshroom CNAME room 117 | cave CNAME room 118 | ha NS nycmesh-713-dns-auth-3 119 | ;ha.room CNAME ha 120 | octoprint.room A 10.70.147.150 121 | 122 | ; Supernode3 123 | l2tpvpn.sn3 A 199.170.132.6 124 | wgvpn.sn3 A 199.170.132.43 125 | 126 | ; Supernode10 127 | l2tpvpn.sn10 A 23.158.16.21 128 | wgvpn.sn10 A 23.158.16.28 129 | 130 | ; Supernode1 131 | ; all SN1 services decom'd Feb24 132 | 133 | ; nycmesh-375p-router1.oob.sn1 A 10.42.43.1 134 | ; nycmesh-375p-ep1.oob.sn1 A 10.42.43.2 135 | ; nycmesh-375p-routera.oob.sn1 A 10.42.43.3 136 | ; nycmesh-375p-cisco1.oob.sn1 A 10.42.43.9 137 | ; nycmesh-375p-a1s1.oob.sn1 A 10.42.43.11 138 | ; nycmesh-375p-a1s2.oob.sn1 A 10.42.43.12 139 | ; nycmesh-375p-a1s3.oob.sn1 A 10.42.43.13 140 | ; nycmesh-375p-a3.oob.sn1 A 10.42.43.14 141 | ; nycmesh-375p-a4.oob.sn1 A 10.42.43.15 142 | ; nycmesh-375p-a2a1.oob.sn1 A 10.42.43.21 143 | ; nycmesh-375p-af24-firehouse.oob.sn1 A 10.42.43.25 144 | ; nycmesh-375p-esxi1.oob.sn1 A 10.42.43.31 145 | ; nycmesh-375p-core1.oob.sn1 A 10.42.43.41 146 | ; nycmesh-375p-vpn1.oob.sn1 A 10.42.43.42 147 | ; nycmesh-375p-dns1.oob.sn1 A 10.42.43.43 148 | ; nycmesh-375p-border2.oob.sn1 A 10.42.43.44 149 | ; nycmesh-375p-speed1.oob.sn1 A 10.42.43.45 150 | ; nycmesh-375p-vpn2.oob.sn1 A 10.42.43.46 151 | 152 | ; nycmesh-375p-core1.v46.sn1 A 10.70.74.1 153 | 154 | ; nycmesh-375p-core1.internalpeer.sn1 A 10.70.71.129 155 | ; nycmesh-375p-core2.internalpeer.sn1 A 10.70.71.130 156 | ; nycmesh-375p-vpn1.internalpeer.sn1 A 10.70.71.131 157 | ; nycmesh-375p-vpn2.internalpeer.sn1 A 10.70.71.132 158 | ; nycmesh-375p-dns1.internalpeer.sn1 A 10.70.71.133 159 | ; nycmesh-375p-dns2.sn1 A 10.70.76.99 160 | ; nycmesh-375p-svc1.internalpeer.sn1 A 10.70.71.135 161 | ; nycmesh-375p-svc2.internalpeer.sn1 A 10.70.71.136 162 | ; nycmesh-375p-maint1.internalpeer.sn1 A 10.70.71.158 163 | 164 | ; nycmesh-375p-core2.ipsec.sn1 A 10.70.72.1 165 | ; nycmesh-n3-sxtsq-n3.ipsec.sn1 A 10.70.72.130 166 | 167 | ; nycmesh-375p-core1.lo.sn1 A 10.70.254.1 168 | ; nycmesh-375p-vpn1.lo.sn1 A 10.70.254.7 169 | ; nycmesh-375p-dns1.lo.sn1 A 10.70.254.9 170 | 171 | ; nycmesh-375p-routera.public.sn1 A 199.167.59.1 172 | ; nycmesh-375p-oob-router1.public.sn1 A 199.167.59.2 173 | ; nycmesh-375p-core1.public.sn1 A 199.167.59.3 174 | ; nycmesh-375p-vpn1.public.sn1 A 199.167.59.4 175 | ; nycmesh-375p-border2.public.sn1 A 199.167.59.5 176 | ; nycmesh-375p-vpn2.public.sn1 A 199.167.59.6 177 | ; nycmesh-375p-dns1.public.sn1 A 199.167.59.10 178 | 179 | ; gnycmesh-375p-core1.intpeer.sn1 A 10.70.252.1 180 | ; gnycmesh-375p-routera.intpeer.sn1 A 10.70.252.3 181 | ; gnycmesh-375p-vpn1.intpeer.sn1 A 10.70.252.7 182 | ; gnycmesh-375p-vpn2.intpeer.sn1 A 10.70.252.8 183 | ; gnycmesh-375p-dns1.intpeer.sn1 A 10.70.252.9 184 | 185 | 186 | ; records for rDNS 10.70.253.0/24 (./31) 187 | sn3.af24-ph A 10.70.253.0 188 | ph.af24-sn3 A 10.70.253.1 189 | vernon.east A 10.70.253.2 190 | jefferson.lbe A 10.70.253.3 191 | sn3.af60xr A 10.70.253.4 192 | vernon.af60xr A 10.70.253.5 193 | ph.northwest A 10.70.253.6 194 | spencer.lbelr A 10.70.253.7 195 | ph.southeast A 10.70.253.8 196 | brownsville.lbelr A 10.70.253.9 197 | ph.af60lr A 10.70.253.10 198 | nostrand.af60lr A 10.70.253.11 199 | vernon.sector A 10.70.253.12 200 | nn265.lbe2 A 10.70.253.13 201 | sn3.vpn-andrew A 10.70.253.14 202 | vpn-andrew.sn3 A 10.70.253.15 203 | sn3.vpn-nn383 A 10.70.253.16 204 | vpn-nn383.sn3 A 10.70.253.17 205 | rivington.gbep A 10.70.253.18 206 | 100ava.gbep A 10.70.253.19 207 | twobridge.lbe A 10.70.253.20 208 | sn1.prism2 A 10.70.253.21 209 | saratoga.af24 A 10.70.253.22 210 | softsurplus.af24 A 10.70.253.23 211 | saratoga.sector A 10.70.253.24 212 | browsville.lbelr A 10.70.253.25 213 | vernon.af60lr-1 A 10.70.253.26 214 | saratoga.af60lr A 10.70.253.27 215 | sn3.vpn-nn136 A 10.70.253.28 216 | svpn-nn136.sn3 A 10.70.253.29 217 | vernon.af60lr.ph A 10.70.253.30 218 | ph.af60lr.vernon A 10.70.253.31 219 | Vernon.sector A 10.70.253.32 220 | 1779.lbe.Vernon A 10.70.253.33 221 | saratoga.lbe A 10.70.253.34 222 | cypress.lbe A 10.70.253.35 223 | Vernon.sector A 10.70.253.36 224 | nn436.lbe A 10.70.253.37 225 | sn10.fiber A 10.70.253.38 226 | grand34.fiber A 10.70.253.39 227 | ph.sector A 10.70.253.40 228 | nn3607.lbelr A 10.70.253.41 229 | sn10.vpn A 10.70.253.42 230 | sn3.vpn A 10.70.253.43 231 | rivington.lbe A 10.70.253.44 232 | guernsey.lbe A 10.70.253.45 233 | sn10.vpn1.sn3 A 10.70.253.46 234 | sn3.vpn1.sn10 A 10.70.253.47 235 | Robj135.WG A 10.70.253.48 236 | sn3.Robj135.WG A 10.70.253.49 237 | 100ava.nsl.731 A 10.70.253.50 238 | 7thstreet.nsl.2274 A 10.70.253.51 239 | sn3.af24.2282 A 10.70.253.52 240 | degraw.af24.sn3 A 10.70.253.53 241 | nn219.lbe.ph A 10.70.253.54 242 | ph.sector.nn219 A 10.70.253.55 243 | vlado.lbelr.ph-sector A 10.70.253.56 244 | ph-sector.lbelr.vlado A 10.70.253.57 245 | Thames.lbe A 10.70.253.58 246 | Softsurplus.sector A 10.70.253.59 247 | nn4126.Lbe A 10.70.253.60 248 | Vernon.sector A 10.70.253.61 249 | FLo.lbe2 A 10.70.253.62 250 | Vernon.sector A 10.70.253.63 251 | 252 | 253 | ; records for rDNS 10.70.251.0/24 (./30) 254 | grand34.af60lr.prallel A 10.70.251.1 255 | parallel.af60lr.grand34 A 10.70.251.2 256 | grand34.lhg60.rivington A 10.70.251.5 257 | rivington.lhg60.grand34 A 10.70.251.6 258 | grand34.af60lr.navyyard A 10.70.251.9 259 | navyyard.af60lr.grand34 A 10.70.251.10 260 | grand34.eh8010.vernon A 10.70.251.13 261 | vernon.eh8010.grand34 A 10.70.251.14 262 | sn1.af24.grand32 A 10.70.251.17 263 | grand32.af24.sn1 A 10.70.251.18 264 | grand34.lblr.guernsey A 10.70.251.45 265 | guernsey.lblr.grand34 A 10.70.251.46 266 | 267 | ; record for rDNS others 268 | sn10edge2-sn10core1.internal A 10.70.102.1 269 | sn10core1-sn10edge2.internal A 10.70.102.2 270 | nycmesh-10-nat A 10.70.102.6 271 | nycmesh-sn10-edge1-temp A 10.70.102.132 272 | -------------------------------------------------------------------------------- /infra/ansible/roles/dns_telegraf/templates/telegraf.conf.j2: -------------------------------------------------------------------------------- 1 | # Telegraf Configuration 2 | # 3 | # Telegraf is entirely plugin driven. All metrics are gathered from the 4 | # declared inputs, and sent to the declared outputs. 5 | # 6 | # Plugins must be declared in here to be active. 7 | # To deactivate a plugin, comment out the name and any variables. 8 | # 9 | # Use 'telegraf -config telegraf.conf -test' to see what metrics a config 10 | # file would generate. 11 | # 12 | # Environment variables can be used anywhere in this config file, simply surround 13 | # them with ${}. For strings the variable must be within quotes (ie, "${STR_VAR}"), 14 | # for numbers and booleans they should be plain (ie, ${INT_VAR}, ${BOOL_VAR}) 15 | 16 | 17 | # Global tags can be specified here in key="value" format. 18 | [global_tags] 19 | # dc = "us-east-1" # will tag all metrics with dc=us-east-1 20 | # rack = "1a" 21 | ## Environment variables can be used as tags, and throughout the config file 22 | # user = "$USER" 23 | 24 | # Configuration for telegraf agent 25 | [agent] 26 | ## Default data collection interval for all inputs 27 | interval = "30s" 28 | ## Rounds collection interval to 'interval' 29 | ## ie, if interval="10s" then always collect on :00, :10, :20, etc. 30 | round_interval = true 31 | 32 | ## Telegraf will send metrics to outputs in batches of at most 33 | ## metric_batch_size metrics. 34 | ## This controls the size of writes that Telegraf sends to output plugins. 35 | metric_batch_size = 1000 36 | 37 | ## Maximum number of unwritten metrics per output. Increasing this value 38 | ## allows for longer periods of output downtime without dropping metrics at the 39 | ## cost of higher maximum memory usage. 40 | metric_buffer_limit = 10000 41 | 42 | ## Collection jitter is used to jitter the collection by a random amount. 43 | ## Each plugin will sleep for a random time within jitter before collecting. 44 | ## This can be used to avoid many plugins querying things like sysfs at the 45 | ## same time, which can have a measurable effect on the system. 46 | collection_jitter = "5s" 47 | 48 | ## Collection offset is used to shift the collection by the given amount. 49 | ## This can be be used to avoid many plugins querying constraint devices 50 | ## at the same time by manually scheduling them in time. 51 | # collection_offset = "0s" 52 | 53 | ## Default flushing interval for all outputs. Maximum flush_interval will be 54 | ## flush_interval + flush_jitter 55 | flush_interval = "10s" 56 | ## Jitter the flush interval by a random amount. This is primarily to avoid 57 | ## large write spikes for users running a large number of telegraf instances. 58 | ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s 59 | flush_jitter = "5s" 60 | 61 | ## Collected metrics are rounded to the precision specified. Precision is 62 | ## specified as an interval with an integer + unit (e.g. 0s, 10ms, 2us, 4s). 63 | ## Valid time units are "ns", "us" (or "µs"), "ms", "s". 64 | ## 65 | ## By default or when set to "0s", precision will be set to the same 66 | ## timestamp order as the collection interval, with the maximum being 1s: 67 | ## ie, when interval = "10s", precision will be "1s" 68 | ## when interval = "250ms", precision will be "1ms" 69 | ## 70 | ## Precision will NOT be used for service inputs. It is up to each individual 71 | ## service input to set the timestamp at the appropriate precision. 72 | precision = "0s" 73 | 74 | ## Log at debug level. 75 | # debug = false 76 | ## Log only error level messages. 77 | # quiet = false 78 | 79 | ## Log target controls the destination for logs and can be one of "file", 80 | ## "stderr" or, on Windows, "eventlog". When set to "file", the output file 81 | ## is determined by the "logfile" setting. 82 | # logtarget = "file" 83 | 84 | ## Name of the file to be logged to when using the "file" logtarget. If set to 85 | ## the empty string then logs are written to stderr. 86 | # logfile = "" 87 | 88 | ## The logfile will be rotated after the time interval specified. When set 89 | ## to 0 no time based rotation is performed. Logs are rotated only when 90 | ## written to, if there is no log activity rotation may be delayed. 91 | # logfile_rotation_interval = "0h" 92 | 93 | ## The logfile will be rotated when it becomes larger than the specified 94 | ## size. When set to 0 no size based rotation is performed. 95 | # logfile_rotation_max_size = "0MB" 96 | 97 | ## Maximum number of rotated archives to keep, any older logs are deleted. 98 | ## If set to -1, no archives are removed. 99 | # logfile_rotation_max_archives = 5 100 | 101 | ## Pick a timezone to use when logging or type 'local' for local time. 102 | ## Example: America/Chicago 103 | # log_with_timezone = "" 104 | 105 | ## Override default hostname, if empty use os.Hostname() 106 | hostname = "" 107 | ## If set to true, do no set the "host" tag in the telegraf agent. 108 | omit_hostname = false 109 | 110 | ## Method of translating SNMP objects. Can be "netsnmp" (deprecated) which 111 | ## translates by calling external programs snmptranslate and snmptable, 112 | ## or "gosmi" which translates using the built-in gosmi library. 113 | # snmp_translator = "netsnmp" 114 | 115 | ## Name of the file to load the state of plugins from and store the state to. 116 | ## If uncommented and not empty, this file will be used to save the state of 117 | ## stateful plugins on termination of Telegraf. If the file exists on start, 118 | ## the state in the file will be restored for the plugins. 119 | # statefile = "" 120 | 121 | ## Flag to skip running processors after aggregators 122 | ## By default, processors are run a second time after aggregators. Changing 123 | ## this setting to true will skip the second run of processors. 124 | # skip_processors_after_aggregators = false 125 | 126 | ############################################################################### 127 | # INPUTS # 128 | ############################################################################### 129 | [[inputs.cpu]] 130 | percpu = true 131 | [[inputs.disk]] 132 | [[inputs.diskio]] 133 | [[inputs.mem]] 134 | [[inputs.net]] 135 | [[inputs.system]] 136 | [[inputs.swap]] 137 | [[inputs.netstat]] 138 | [[inputs.processes]] 139 | [[inputs.kernel]] 140 | 141 | {% if telegraf_knot != "" %} 142 | [[inputs.exec]] 143 | command = "sh /usr/bin/telegraf-knot.sh -s /run/knot/knot.sock" 144 | data_format = "influx" 145 | 146 | interval = "30s" 147 | {% endif %} 148 | 149 | {% if telegraf_kresd != "" %} 150 | [[inputs.prometheus]] 151 | urls = ["http://127.0.0.1:9100/metrics"] 152 | metric_version = 2 153 | 154 | interval = "30s" 155 | {% endif %} 156 | 157 | ############################################################################### 158 | # OUTPUTS # 159 | ############################################################################### 160 | # Configuration for sending metrics to InfluxDB 2.0 161 | [[outputs.influxdb_v2]] 162 | ## The URLs of the InfluxDB cluster nodes. 163 | ## 164 | ## Multiple URLs can be specified for a single cluster, only ONE of the 165 | ## urls will be written to each interval. 166 | ## ex: urls = ["https://us-west-2-1.aws.cloud2.influxdata.com"] 167 | urls = ["http://10.70.90.147:30086"] 168 | 169 | ## Local address to bind when connecting to the server 170 | ## If empty or not set, the local address is automatically chosen. 171 | # local_address = "" 172 | 173 | ## Token for authentication. 174 | token = "{{ INFLUX_DB_TOKEN }}" 175 | 176 | ## Organization is the name of the organization you wish to write to. 177 | organization = "nycmesh" 178 | 179 | ## Destination bucket to write into. 180 | bucket = "dns_monitoring" 181 | 182 | ## The value of this tag will be used to determine the bucket. If this 183 | ## tag is not set the 'bucket' option is used as the default. 184 | # bucket_tag = "" 185 | 186 | ## If true, the bucket tag will not be added to the metric. 187 | # exclude_bucket_tag = false 188 | 189 | ## Timeout for HTTP messages. 190 | # timeout = "5s" 191 | 192 | ## Additional HTTP headers 193 | # http_headers = {"X-Special-Header" = "Special-Value"} 194 | 195 | ## HTTP Proxy override, if unset values the standard proxy environment 196 | ## variables are consulted to determine which proxy, if any, should be used. 197 | # http_proxy = "http://corporate.proxy:3128" 198 | 199 | ## HTTP User-Agent 200 | # user_agent = "telegraf" 201 | 202 | ## Content-Encoding for write request body, can be set to "gzip" to 203 | ## compress body or "identity" to apply no encoding. 204 | # content_encoding = "gzip" 205 | 206 | ## Enable or disable uint support for writing uints influxdb 2.0. 207 | # influx_uint_support = false 208 | 209 | ## When true, Telegraf will omit the timestamp on data to allow InfluxDB 210 | ## to set the timestamp of the data during ingestion. This is generally NOT 211 | ## what you want as it can lead to data points captured at different times 212 | ## getting omitted due to similar data. 213 | # influx_omit_timestamp = false 214 | 215 | ## HTTP/2 Timeouts 216 | ## The following values control the HTTP/2 client's timeouts. These settings 217 | ## are generally not required unless a user is seeing issues with client 218 | ## disconnects. If a user does see issues, then it is suggested to set these 219 | ## values to "15s" for ping timeout and "30s" for read idle timeout and 220 | ## retry. 221 | ## 222 | ## Note that the timer for read_idle_timeout begins at the end of the last 223 | ## successful write and not at the beginning of the next write. 224 | # ping_timeout = "0s" 225 | # read_idle_timeout = "0s" 226 | 227 | ## Optional TLS Config for use on HTTP connections. 228 | # tls_ca = "/etc/telegraf/ca.pem" 229 | # tls_cert = "/etc/telegraf/cert.pem" 230 | # tls_key = "/etc/telegraf/key.pem" 231 | ## Use TLS but skip chain & host verification 232 | # insecure_skip_verify = false 233 | 234 | [[outputs.datadog]] 235 | apikey = "{{ DATADOG_API_KEY }}" 236 | namepass = ["cookies*", "prometheus*", "rrl*", "server*", "stats*"] 237 | url = "https://{{ DATADOG_SITE }}/api/v1/series" 238 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | ansible==11.6.0 \ 2 | --hash=sha256:5b9c19d6a1080011c14c821bc7e6f8fd5b2a392219cbf2ced9be05e6d447d8cd 3 | ansible-core==2.18.6 \ 4 | --hash=sha256:12a34749a7b20f0f1536bd3e3b2e137341867e4642e351273e96647161f595c0 5 | cffi==2.0.0 \ 6 | --hash=sha256:00bdf7acc5f795150faa6957054fbbca2439db2f775ce831222b66f192f03beb \ 7 | --hash=sha256:07b271772c100085dd28b74fa0cd81c8fb1a3ba18b21e03d7c27f3436a10606b \ 8 | --hash=sha256:087067fa8953339c723661eda6b54bc98c5625757ea62e95eb4898ad5e776e9f \ 9 | --hash=sha256:0a1527a803f0a659de1af2e1fd700213caba79377e27e4693648c2923da066f9 \ 10 | --hash=sha256:0cf2d91ecc3fcc0625c2c530fe004f82c110405f101548512cce44322fa8ac44 \ 11 | --hash=sha256:0f6084a0ea23d05d20c3edcda20c3d006f9b6f3fefeac38f59262e10cef47ee2 \ 12 | --hash=sha256:12873ca6cb9b0f0d3a0da705d6086fe911591737a59f28b7936bdfed27c0d47c \ 13 | --hash=sha256:19f705ada2530c1167abacb171925dd886168931e0a7b78f5bffcae5c6b5be75 \ 14 | --hash=sha256:1cd13c99ce269b3ed80b417dcd591415d3372bcac067009b6e0f59c7d4015e65 \ 15 | --hash=sha256:1e3a615586f05fc4065a8b22b8152f0c1b00cdbc60596d187c2a74f9e3036e4e \ 16 | --hash=sha256:1f72fb8906754ac8a2cc3f9f5aaa298070652a0ffae577e0ea9bd480dc3c931a \ 17 | --hash=sha256:1fc9ea04857caf665289b7a75923f2c6ed559b8298a1b8c49e59f7dd95c8481e \ 18 | --hash=sha256:203a48d1fb583fc7d78a4c6655692963b860a417c0528492a6bc21f1aaefab25 \ 19 | --hash=sha256:2081580ebb843f759b9f617314a24ed5738c51d2aee65d31e02f6f7a2b97707a \ 20 | --hash=sha256:21d1152871b019407d8ac3985f6775c079416c282e431a4da6afe7aefd2bccbe \ 21 | --hash=sha256:24b6f81f1983e6df8db3adc38562c83f7d4a0c36162885ec7f7b77c7dcbec97b \ 22 | --hash=sha256:256f80b80ca3853f90c21b23ee78cd008713787b1b1e93eae9f3d6a7134abd91 \ 23 | --hash=sha256:28a3a209b96630bca57cce802da70c266eb08c6e97e5afd61a75611ee6c64592 \ 24 | --hash=sha256:2c8f814d84194c9ea681642fd164267891702542f028a15fc97d4674b6206187 \ 25 | --hash=sha256:2de9a304e27f7596cd03d16f1b7c72219bd944e99cc52b84d0145aefb07cbd3c \ 26 | --hash=sha256:38100abb9d1b1435bc4cc340bb4489635dc2f0da7456590877030c9b3d40b0c1 \ 27 | --hash=sha256:3925dd22fa2b7699ed2617149842d2e6adde22b262fcbfada50e3d195e4b3a94 \ 28 | --hash=sha256:3e17ed538242334bf70832644a32a7aae3d83b57567f9fd60a26257e992b79ba \ 29 | --hash=sha256:3e837e369566884707ddaf85fc1744b47575005c0a229de3327f8f9a20f4efeb \ 30 | --hash=sha256:3f4d46d8b35698056ec29bca21546e1551a205058ae1a181d871e278b0b28165 \ 31 | --hash=sha256:44d1b5909021139fe36001ae048dbdde8214afa20200eda0f64c068cac5d5529 \ 32 | --hash=sha256:45d5e886156860dc35862657e1494b9bae8dfa63bf56796f2fb56e1679fc0bca \ 33 | --hash=sha256:4647afc2f90d1ddd33441e5b0e85b16b12ddec4fca55f0d9671fef036ecca27c \ 34 | --hash=sha256:4671d9dd5ec934cb9a73e7ee9676f9362aba54f7f34910956b84d727b0d73fb6 \ 35 | --hash=sha256:53f77cbe57044e88bbd5ed26ac1d0514d2acf0591dd6bb02a3ae37f76811b80c \ 36 | --hash=sha256:5eda85d6d1879e692d546a078b44251cdd08dd1cfb98dfb77b670c97cee49ea0 \ 37 | --hash=sha256:5fed36fccc0612a53f1d4d9a816b50a36702c28a2aa880cb8a122b3466638743 \ 38 | --hash=sha256:61d028e90346df14fedc3d1e5441df818d095f3b87d286825dfcbd6459b7ef63 \ 39 | --hash=sha256:66f011380d0e49ed280c789fbd08ff0d40968ee7b665575489afa95c98196ab5 \ 40 | --hash=sha256:6824f87845e3396029f3820c206e459ccc91760e8fa24422f8b0c3d1731cbec5 \ 41 | --hash=sha256:6c6c373cfc5c83a975506110d17457138c8c63016b563cc9ed6e056a82f13ce4 \ 42 | --hash=sha256:6d02d6655b0e54f54c4ef0b94eb6be0607b70853c45ce98bd278dc7de718be5d \ 43 | --hash=sha256:6d50360be4546678fc1b79ffe7a66265e28667840010348dd69a314145807a1b \ 44 | --hash=sha256:730cacb21e1bdff3ce90babf007d0a0917cc3e6492f336c2f0134101e0944f93 \ 45 | --hash=sha256:737fe7d37e1a1bffe70bd5754ea763a62a066dc5913ca57e957824b72a85e205 \ 46 | --hash=sha256:74a03b9698e198d47562765773b4a8309919089150a0bb17d829ad7b44b60d27 \ 47 | --hash=sha256:7553fb2090d71822f02c629afe6042c299edf91ba1bf94951165613553984512 \ 48 | --hash=sha256:7a66c7204d8869299919db4d5069a82f1561581af12b11b3c9f48c584eb8743d \ 49 | --hash=sha256:7cc09976e8b56f8cebd752f7113ad07752461f48a58cbba644139015ac24954c \ 50 | --hash=sha256:81afed14892743bbe14dacb9e36d9e0e504cd204e0b165062c488942b9718037 \ 51 | --hash=sha256:8941aaadaf67246224cee8c3803777eed332a19d909b47e29c9842ef1e79ac26 \ 52 | --hash=sha256:89472c9762729b5ae1ad974b777416bfda4ac5642423fa93bd57a09204712322 \ 53 | --hash=sha256:8ea985900c5c95ce9db1745f7933eeef5d314f0565b27625d9a10ec9881e1bfb \ 54 | --hash=sha256:8eca2a813c1cb7ad4fb74d368c2ffbbb4789d377ee5bb8df98373c2cc0dee76c \ 55 | --hash=sha256:92b68146a71df78564e4ef48af17551a5ddd142e5190cdf2c5624d0c3ff5b2e8 \ 56 | --hash=sha256:9332088d75dc3241c702d852d4671613136d90fa6881da7d770a483fd05248b4 \ 57 | --hash=sha256:94698a9c5f91f9d138526b48fe26a199609544591f859c870d477351dc7b2414 \ 58 | --hash=sha256:9a67fc9e8eb39039280526379fb3a70023d77caec1852002b4da7e8b270c4dd9 \ 59 | --hash=sha256:9de40a7b0323d889cf8d23d1ef214f565ab154443c42737dfe52ff82cf857664 \ 60 | --hash=sha256:a05d0c237b3349096d3981b727493e22147f934b20f6f125a3eba8f994bec4a9 \ 61 | --hash=sha256:afb8db5439b81cf9c9d0c80404b60c3cc9c3add93e114dcae767f1477cb53775 \ 62 | --hash=sha256:b18a3ed7d5b3bd8d9ef7a8cb226502c6bf8308df1525e1cc676c3680e7176739 \ 63 | --hash=sha256:b1e74d11748e7e98e2f426ab176d4ed720a64412b6a15054378afdb71e0f37dc \ 64 | --hash=sha256:b21e08af67b8a103c71a250401c78d5e0893beff75e28c53c98f4de42f774062 \ 65 | --hash=sha256:b4c854ef3adc177950a8dfc81a86f5115d2abd545751a304c5bcf2c2c7283cfe \ 66 | --hash=sha256:b882b3df248017dba09d6b16defe9b5c407fe32fc7c65a9c69798e6175601be9 \ 67 | --hash=sha256:baf5215e0ab74c16e2dd324e8ec067ef59e41125d3eade2b863d294fd5035c92 \ 68 | --hash=sha256:c649e3a33450ec82378822b3dad03cc228b8f5963c0c12fc3b1e0ab940f768a5 \ 69 | --hash=sha256:c654de545946e0db659b3400168c9ad31b5d29593291482c43e3564effbcee13 \ 70 | --hash=sha256:c6638687455baf640e37344fe26d37c404db8b80d037c3d29f58fe8d1c3b194d \ 71 | --hash=sha256:c8d3b5532fc71b7a77c09192b4a5a200ea992702734a2e9279a37f2478236f26 \ 72 | --hash=sha256:cb527a79772e5ef98fb1d700678fe031e353e765d1ca2d409c92263c6d43e09f \ 73 | --hash=sha256:cf364028c016c03078a23b503f02058f1814320a56ad535686f90565636a9495 \ 74 | --hash=sha256:d48a880098c96020b02d5a1f7d9251308510ce8858940e6fa99ece33f610838b \ 75 | --hash=sha256:d68b6cef7827e8641e8ef16f4494edda8b36104d79773a334beaa1e3521430f6 \ 76 | --hash=sha256:d9b29c1f0ae438d5ee9acb31cadee00a58c46cc9c0b2f9038c6b0b3470877a8c \ 77 | --hash=sha256:d9b97165e8aed9272a6bb17c01e3cc5871a594a446ebedc996e2397a1c1ea8ef \ 78 | --hash=sha256:da68248800ad6320861f129cd9c1bf96ca849a2771a59e0344e88681905916f5 \ 79 | --hash=sha256:da902562c3e9c550df360bfa53c035b2f241fed6d9aef119048073680ace4a18 \ 80 | --hash=sha256:dbd5c7a25a7cb98f5ca55d258b103a2054f859a46ae11aaf23134f9cc0d356ad \ 81 | --hash=sha256:dd4f05f54a52fb558f1ba9f528228066954fee3ebe629fc1660d874d040ae5a3 \ 82 | --hash=sha256:de8dad4425a6ca6e4e5e297b27b5c824ecc7581910bf9aee86cb6835e6812aa7 \ 83 | --hash=sha256:e11e82b744887154b182fd3e7e8512418446501191994dbf9c9fc1f32cc8efd5 \ 84 | --hash=sha256:e6e73b9e02893c764e7e8d5bb5ce277f1a009cd5243f8228f75f842bf937c534 \ 85 | --hash=sha256:f73b96c41e3b2adedc34a7356e64c8eb96e03a3782b535e043a986276ce12a49 \ 86 | --hash=sha256:f93fd8e5c8c0a4aa1f424d6173f14a892044054871c771f8566e4008eaa359d2 \ 87 | --hash=sha256:fc33c5141b55ed366cfaad382df24fe7dcbc686de5be719b207bb248e3053dc5 \ 88 | --hash=sha256:fc7de24befaeae77ba923797c7c87834c73648a05a4bde34b3b7e5588973a453 \ 89 | --hash=sha256:fe562eb1a64e67dd297ccc4f5addea2501664954f2692b69a76449ec7913ecbf 90 | cryptography==46.0.1 \ 91 | --hash=sha256:0a17377fa52563d730248ba1f68185461fff36e8bc75d8787a7dd2e20a802b7a \ 92 | --hash=sha256:0ca4be2af48c24df689a150d9cd37404f689e2968e247b6b8ff09bff5bcd786f \ 93 | --hash=sha256:0d1922d9280e08cde90b518a10cd66831f632960a8d08cb3418922d83fce6f12 \ 94 | --hash=sha256:0dfb7c88d4462a0cfdd0d87a3c245a7bc3feb59de101f6ff88194f740f72eda6 \ 95 | --hash=sha256:0ff483716be32690c14636e54a1f6e2e1b7bf8e22ca50b989f88fa1b2d287080 \ 96 | --hash=sha256:13e67c4d3fb8b6bc4ef778a7ccdd8df4cd15b4bcc18f4239c8440891a11245cc \ 97 | --hash=sha256:15b5fd9358803b0d1cc42505a18d8bca81dabb35b5cfbfea1505092e13a9d96d \ 98 | --hash=sha256:1cd6d50c1a8b79af1a6f703709d8973845f677c8e97b1268f5ff323d38ce8475 \ 99 | --hash=sha256:2dd339ba3345b908fa3141ddba4025568fa6fd398eabce3ef72a29ac2d73ad75 \ 100 | --hash=sha256:341fb7a26bc9d6093c1b124b9f13acc283d2d51da440b98b55ab3f79f2522ead \ 101 | --hash=sha256:34f04b7311174469ab3ac2647469743720f8b6c8b046f238e5cb27905695eb2a \ 102 | --hash=sha256:41c281a74df173876da1dc9a9b6953d387f06e3d3ed9284e3baae3ab3f40883a \ 103 | --hash=sha256:449ef2b321bec7d97ef2c944173275ebdab78f3abdd005400cc409e27cd159ab \ 104 | --hash=sha256:45f790934ac1018adeba46a0f7289b2b8fe76ba774a88c7f1922213a56c98bc1 \ 105 | --hash=sha256:48948940d0ae00483e85e9154bb42997d0b77c21e43a77b7773c8c80de532ac5 \ 106 | --hash=sha256:4c49eda9a23019e11d32a0eb51a27b3e7ddedde91e099c0ac6373e3aacc0d2ee \ 107 | --hash=sha256:504e464944f2c003a0785b81668fe23c06f3b037e9cb9f68a7c672246319f277 \ 108 | --hash=sha256:534b96c0831855e29fc3b069b085fd185aa5353033631a585d5cd4dd5d40d657 \ 109 | --hash=sha256:6ef1488967e729948d424d09c94753d0167ce59afba8d0f6c07a22b629c557b2 \ 110 | --hash=sha256:7176a5ab56fac98d706921f6416a05e5aff7df0e4b91516f450f8627cda22af3 \ 111 | --hash=sha256:7411c910fb2a412053cf33cfad0153ee20d27e256c6c3f14d7d7d1d9fec59fd5 \ 112 | --hash=sha256:757af4f6341ce7a1e47c326ca2a81f41d236070217e5fbbad61bbfe299d55d28 \ 113 | --hash=sha256:7823bc7cdf0b747ecfb096d004cc41573c2f5c7e3a29861603a2871b43d3ef32 \ 114 | --hash=sha256:7fab1187b6c6b2f11a326f33b036f7168f5b996aedd0c059f9738915e4e8f53a \ 115 | --hash=sha256:84ef1f145de5aee82ea2447224dc23f065ff4cc5791bb3b506615957a6ba8128 \ 116 | --hash=sha256:92e8cfe8bd7dd86eac0a677499894862cd5cc2fd74de917daa881d00871ac8e7 \ 117 | --hash=sha256:9394c7d5a7565ac5f7d9ba38b2617448eba384d7b107b262d63890079fad77ca \ 118 | --hash=sha256:9495d78f52c804b5ec8878b5b8c7873aa8e63db9cd9ee387ff2db3fffe4df784 \ 119 | --hash=sha256:9873bf7c1f2a6330bdfe8621e7ce64b725784f9f0c3a6a55c3047af5849f920e \ 120 | --hash=sha256:9babb7818fdd71394e576cf26c5452df77a355eac1a27ddfa24096665a27f8fd \ 121 | --hash=sha256:9e8776dac9e660c22241b6587fae51a67b4b0147daa4d176b172c3ff768ad736 \ 122 | --hash=sha256:9ed64e5083fa806709e74fc5ea067dfef9090e5b7a2320a49be3c9df3583a2d8 \ 123 | --hash=sha256:9f2c4cc63be3ef43c0221861177cee5d14b505cd4d4599a89e2cd273c4d3542a \ 124 | --hash=sha256:9f40642a140c0c8649987027867242b801486865277cbabc8c6059ddef16dc8b \ 125 | --hash=sha256:af84e8e99f1a82cea149e253014ea9dc89f75b82c87bb6c7242203186f465129 \ 126 | --hash=sha256:b9c79af2c3058430d911ff1a5b2b96bbfe8da47d5ed961639ce4681886614e70 \ 127 | --hash=sha256:c52fded6383f7e20eaf70a60aeddd796b3677c3ad2922c801be330db62778e05 \ 128 | --hash=sha256:cbb8e769d4cac884bb28e3ff620ef1001b75588a5c83c9c9f1fdc9afbe7f29b0 \ 129 | --hash=sha256:d84c40bdb8674c29fa192373498b6cb1e84f882889d21a471b45d1f868d8d44b \ 130 | --hash=sha256:db5597a4c7353b2e5fb05a8e6cb74b56a4658a2b7bf3cb6b1821ae7e7fd6eaa0 \ 131 | --hash=sha256:e22801b61613ebdebf7deb18b507919e107547a1d39a3b57f5f855032dd7cfb8 \ 132 | --hash=sha256:e34da95e29daf8a71cb2841fd55df0511539a6cdf33e6f77c1e95e44006b9b46 \ 133 | --hash=sha256:e46710a240a41d594953012213ea8ca398cd2448fbc5d0f1be8160b5511104a0 \ 134 | --hash=sha256:e94eb5fa32a8a9f9bf991f424f002913e3dd7c699ef552db9b14ba6a76a6313b \ 135 | --hash=sha256:ec13b7105117dbc9afd023300fb9954d72ca855c274fe563e72428ece10191c0 \ 136 | --hash=sha256:ed570874e88f213437f5cf758f9ef26cbfc3f336d889b1e592ee11283bb8d1c7 \ 137 | --hash=sha256:ed957044e368ed295257ae3d212b95456bd9756df490e1ac4538857f67531fcc \ 138 | --hash=sha256:ef648d2c690703501714588b2ba640facd50fd16548133b11b2859e8655a69da \ 139 | --hash=sha256:efc9e51c3e595267ff84adf56e9b357db89ab2279d7e375ffcaf8f678606f3d9 \ 140 | --hash=sha256:f736ab8036796f5a119ff8211deda416f8c15ce03776db704a7a4e17381cb2ef \ 141 | --hash=sha256:f7a24ea78de345cfa7f6a8d3bde8b242c7fac27f2bd78fa23474ca38dfaeeab9 \ 142 | --hash=sha256:f7de12fa0eee6234de9a9ce0ffcfa6ce97361db7a50b09b65c63ac58e5f22fc7 \ 143 | --hash=sha256:f9b55038b5c6c47559aa33626d8ecd092f354e23de3c6975e4bb205df128a2a0 \ 144 | --hash=sha256:fd4b5e2ee4e60425711ec65c33add4e7a626adef79d66f62ba0acfd493af282d 145 | Jinja2==3.1.6 \ 146 | --hash=sha256:85ece4451f492d0c13c5dd7c13a64681a86afae63a5f347908daf103ce6d2f67 147 | MarkupSafe==3.0.2 \ 148 | --hash=sha256:15ab75ef81add55874e7ab7055e9c397312385bd9ced94920f2802310c930396 \ 149 | --hash=sha256:e17c96c14e19278594aa4841ec148115f9c7615a47382ecb6b82bd8fea3ab0c8 \ 150 | --hash=sha256:a123e330ef0853c6e822384873bef7507557d8e4a082961e1defa947aa59ba84 151 | packaging==25.0 \ 152 | --hash=sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484 153 | passlib==1.7.4 \ 154 | --hash=sha256:aa6bca462b8d8bda89c70b382f0c298a20b5560af6cbfa2dce410c0a2fb669f1 155 | pycparser==2.22 \ 156 | --hash=sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc 157 | PyYAML==6.0.2 \ 158 | --hash=sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5 \ 159 | --hash=sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476 \ 160 | --hash=sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85 161 | resolvelib==1.0.1 \ 162 | --hash=sha256:f80de38ae744bcf4e918e27a681a5c6cb63a08d9a926c0989c0730bcdd089049 \ 163 | --hash=sha256:d2da45d1a8dfee81bdd591647783e340ef3bcb104b54c383f70d422ef5cc7dbf 164 | -------------------------------------------------------------------------------- /sld/records.nycmesh.net.tf: -------------------------------------------------------------------------------- 1 | # Main website 2 | resource "namedotcom_record" "record__983532" { 3 | domain_name = "nycmesh.net" 4 | host = "" 5 | record_type = "A" 6 | answer = "104.198.14.52" 7 | } 8 | 9 | # Main website 10 | resource "namedotcom_record" "record_www_983538" { 11 | domain_name = "nycmesh.net" 12 | host = "www" 13 | record_type = "CNAME" 14 | answer = "clever-shannon-d43dce.netlify.com" 15 | } 16 | 17 | # Future subdomain for the wiki 18 | resource "namedotcom_record" "record_wiki_1031824" { 19 | domain_name = "nycmesh.net" 20 | host = "wiki" 21 | record_type = "CNAME" 22 | answer = "kubernetes-lb-prod-sn3.nycmesh.net" 23 | } 24 | 25 | # SPF (email) 26 | resource "namedotcom_record" "record__983545" { 27 | domain_name = "nycmesh.net" 28 | host = "" 29 | record_type = "TXT" 30 | answer = "v=spf1 include:mailgun.org include:servers.mcsv.net ~all" 31 | } 32 | 33 | # DMARC (email) 34 | resource "namedotcom_record" "record__dmarc_3745600" { 35 | domain_name = "nycmesh.net" 36 | host = "_dmarc" 37 | record_type = "TXT" 38 | answer = "v=DMARC1; p=none" 39 | } 40 | 41 | # DKIM (email) 42 | resource "namedotcom_record" "record_k1_domainkey_3735562" { 43 | domain_name = "nycmesh.net" 44 | host = "k1._domainkey" 45 | record_type = "CNAME" 46 | answer = "dkim.mcsv.net" 47 | } 48 | 49 | # MX record for email 50 | resource "namedotcom_record" "record__983546" { 51 | domain_name = "nycmesh.net" 52 | host = "" 53 | record_type = "MX" 54 | answer = "mxa.mailgun.org" 55 | } 56 | 57 | # MX record for email 58 | resource "namedotcom_record" "record__983547" { 59 | domain_name = "nycmesh.net" 60 | host = "" 61 | record_type = "MX" 62 | answer = "mxb.mailgun.org" 63 | } 64 | 65 | # Site verification for mailgun. Only modify if you know what you're doing. 66 | resource "namedotcom_record" "record_pic_domainkey_1171425" { 67 | domain_name = "nycmesh.net" 68 | host = "pic._domainkey" 69 | record_type = "TXT" 70 | answer = "k=rsa; p=MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDSJLcgGjVDfFSpXdVnaz0DdvJeRj7yhcuJjXRUV85TeEOCbNgDcVQXrVJeC/J0z8iiwJAl9gDEf8L729r54VJ/y8ml+xxjIp3hDBIm0Pg9TiTVGO9kif9RlW2unIrGKw2CrE7xM7vZcpw2FQt3fJwZtZ8zBOn68sIU9stR9MUG+QIDAQAB" 71 | } 72 | 73 | # Email subdomain 74 | resource "namedotcom_record" "record_email_1171426" { 75 | domain_name = "nycmesh.net" 76 | host = "email" 77 | record_type = "CNAME" 78 | answer = "mailgun.org" 79 | } 80 | 81 | resource "namedotcom_record" "record__now_3070265" { 82 | domain_name = "nycmesh.net" 83 | host = "_now" 84 | record_type = "TXT" 85 | answer = "f12798020e735de0ae0fac869c386d7e676ac3b828953712b0f28bd718848c14" 86 | } 87 | 88 | # Docs site 89 | resource "namedotcom_record" "record_docs_3526857" { 90 | domain_name = "nycmesh.net" 91 | host = "docs" 92 | record_type = "CNAME" 93 | answer = "quirky-edison-0960a5.netlify.com" 94 | } 95 | 96 | # OS Ticket (IPv4) 97 | resource "namedotcom_record" "record_support_3588805" { 98 | domain_name = "nycmesh.net" 99 | host = "support" 100 | record_type = "A" 101 | answer = "165.227.70.230" 102 | } 103 | 104 | # OS Ticket (IPv6) 105 | resource "namedotcom_record" "record_support_3588806" { 106 | domain_name = "nycmesh.net" 107 | host = "support" 108 | record_type = "AAAA" 109 | answer = "2604:a880:800:10::9f0:3001" 110 | } 111 | 112 | # Dev OS Ticket 113 | resource "namedotcom_record" "record_devsupport_4727325" { 114 | domain_name = "nycmesh.net" 115 | host = "devsupport" 116 | record_type = "A" 117 | answer = "157.245.9.130" 118 | } 119 | 120 | # rDNS 121 | resource "namedotcom_record" "record_ipv4_3588972" { 122 | domain_name = "nycmesh.net" 123 | host = "ipv4" 124 | record_type = "NS" 125 | answer = "ns-518.awsdns-00.net" 126 | } 127 | 128 | # rDNS 129 | resource "namedotcom_record" "record_ipv4_3588978" { 130 | domain_name = "nycmesh.net" 131 | host = "ipv4" 132 | record_type = "NS" 133 | answer = "ns-1709.awsdns-21.co.uk" 134 | } 135 | 136 | # rDNS 137 | resource "namedotcom_record" "record_ipv4_3588980" { 138 | domain_name = "nycmesh.net" 139 | host = "ipv4" 140 | record_type = "NS" 141 | answer = "ns-432.awsdns-54.com" 142 | } 143 | 144 | # rDNS 145 | resource "namedotcom_record" "record_ipv4_3588982" { 146 | domain_name = "nycmesh.net" 147 | host = "ipv4" 148 | record_type = "NS" 149 | answer = "ns-1346.awsdns-40.org" 150 | } 151 | 152 | resource "namedotcom_record" "record_donate2_3618629" { 153 | domain_name = "nycmesh.net" 154 | host = "donate2" 155 | record_type = "A" 156 | answer = "165.227.181.4" 157 | } 158 | 159 | resource "namedotcom_record" "record_unifi_3862748" { 160 | domain_name = "nycmesh.net" 161 | host = "unifi" 162 | record_type = "A" 163 | answer = "10.70.90.158" 164 | } 165 | 166 | # NS record for the authoritative servers for mesh.nycmesh.net at SN3 + SN10 167 | # nycmesh-713-dns-auth-3 168 | # nycmesh-10-dns-auth-6 169 | resource "namedotcom_record" "mesh_ns_nycmesh-713-dns-auth-3" { 170 | domain_name = "nycmesh.net" 171 | host = "mesh" 172 | record_type = "NS" 173 | answer = "nycmesh-713-dns-auth-3.nycmesh.net" 174 | } 175 | 176 | # Authoritative DNS server for the mesh.nycmesh.net zone at SN3 177 | resource "namedotcom_record" "nycmesh-713-dns-auth-3" { 178 | domain_name = "nycmesh.net" 179 | host = "nycmesh-713-dns-auth-3" 180 | record_type = "A" 181 | answer = "199.170.132.47" 182 | } 183 | 184 | # Authoritative DNS server for the mesh.nycmesh.net zone at SN10 185 | resource "namedotcom_record" "nycmesh-10-dns-auth-6" { 186 | domain_name = "nycmesh.net" 187 | host = "nycmesh-10-dns-auth-6" 188 | record_type = "A" 189 | answer = "199.170.132.47" 190 | } 191 | 192 | # NS record for the authoritative servers for mesh.nycmesh.net at SN10 + SN3 193 | # nycmesh-713-dns-auth-4 194 | # nycmesh-10-dns-auth-5 195 | resource "namedotcom_record" "mesh_ns_nycmesh-10-dns-auth-5" { 196 | domain_name = "nycmesh.net" 197 | host = "mesh" 198 | record_type = "NS" 199 | answer = "nycmesh-10-dns-auth-5.nycmesh.net" 200 | } 201 | 202 | # Authoritative DNS server for the mesh.nycmesh.net zone at SN3 203 | resource "namedotcom_record" "nycmesh-713-dns-auth-4" { 204 | domain_name = "nycmesh.net" 205 | host = "nycmesh-713-dns-auth-4" 206 | record_type = "A" 207 | answer = "23.158.16.23" 208 | } 209 | 210 | # Authoritative DNS server for the mesh.nycmesh.net zone at SN10 211 | resource "namedotcom_record" "nycmesh-10-dns-auth-5" { 212 | domain_name = "nycmesh.net" 213 | host = "nycmesh-10-dns-auth-5" 214 | record_type = "A" 215 | answer = "23.158.16.23" 216 | } 217 | 218 | # Slack redirect 219 | # https://github.com/nycmeshnet/slack-redirect 220 | # https://slack.nycmesh.net to github pages 1/4 221 | resource "namedotcom_record" "slack_108" { 222 | domain_name = "nycmesh.net" 223 | host = "slack" 224 | record_type = "A" 225 | answer = "185.199.108.153" 226 | } 227 | 228 | # https://slack.nycmesh.net to github pages 2/4 229 | resource "namedotcom_record" "slack_109" { 230 | domain_name = "nycmesh.net" 231 | host = "slack" 232 | record_type = "A" 233 | answer = "185.199.109.153" 234 | } 235 | 236 | # https://slack.nycmesh.net to github pages 3/4 237 | resource "namedotcom_record" "slack_110" { 238 | domain_name = "nycmesh.net" 239 | host = "slack" 240 | record_type = "A" 241 | answer = "185.199.110.153" 242 | } 243 | 244 | # https://slack.nycmesh.net to github pages 4/4 245 | resource "namedotcom_record" "slack_111" { 246 | domain_name = "nycmesh.net" 247 | host = "slack" 248 | record_type = "A" 249 | answer = "185.199.111.153" 250 | } 251 | 252 | # https://configgen.nycmesh.net to github pages 1/4 253 | resource "namedotcom_record" "record__configgen108" { 254 | domain_name = "nycmesh.net" 255 | host = "configgen" 256 | record_type = "A" 257 | answer = "185.199.108.153" 258 | } 259 | 260 | # https://configgen.nycmesh.net to github pages 2/4 261 | resource "namedotcom_record" "record__configgen109" { 262 | domain_name = "nycmesh.net" 263 | host = "configgen" 264 | record_type = "A" 265 | answer = "185.199.109.153" 266 | } 267 | 268 | # https://configgen.nycmesh.net to github pages 3/4 269 | resource "namedotcom_record" "record__configgen110" { 270 | domain_name = "nycmesh.net" 271 | host = "configgen" 272 | record_type = "A" 273 | answer = "185.199.110.153" 274 | } 275 | 276 | # https://configgen.nycmesh.net to github pages 4/4 277 | resource "namedotcom_record" "record__configgen111" { 278 | domain_name = "nycmesh.net" 279 | host = "configgen" 280 | record_type = "A" 281 | answer = "185.199.111.153" 282 | } 283 | 284 | # Redirects to https://github.com/meshcenter/mesh-api 285 | resource "namedotcom_record" "record_api_7081451" { 286 | domain_name = "nycmesh.net" 287 | host = "api" 288 | record_type = "CNAME" 289 | answer = "nycmesh-api.netlify.com" 290 | } 291 | 292 | # Dashboard service (DigitalOcean) 293 | resource "namedotcom_record" "record_dashboard_7092840" { 294 | domain_name = "nycmesh.net" 295 | host = "dashboard" 296 | record_type = "CNAME" 297 | answer = "nycmesh-dashboard.netlify.com" 298 | } 299 | 300 | # Site verification for github. Only modify if you know what you're doing. 301 | resource "namedotcom_record" "record__github-challenge-nycmeshnet_194338752" { 302 | domain_name = "nycmesh.net" 303 | host = "_github-challenge-nycmeshnet" 304 | record_type = "TXT" 305 | answer = "91a37d19f2" 306 | } 307 | 308 | # Site verification for google. Only modify if you know what you're doing. 309 | resource "namedotcom_record" "record__2193735" { 310 | domain_name = "nycmesh.net" 311 | host = "" 312 | record_type = "TXT" 313 | answer = "google-site-verification=ZqOjueV-PhiukY-NDTf8CbGOPFwzGqeeIwmDQC-ZdRc" 314 | } 315 | 316 | # Site verification for google. Only modify if you know what you're doing. 317 | resource "namedotcom_record" "record__3686691" { 318 | domain_name = "nycmesh.net" 319 | host = "" 320 | record_type = "TXT" 321 | answer = "google-site-verification=bpTWn9VyMNrKSypwCvf-lWaiuO3IuTay6cqvKVud1po" 322 | } 323 | 324 | # Site verification for google. Only modify if you know what you're doing. 325 | resource "namedotcom_record" "record__206768814" { 326 | domain_name = "nycmesh.net" 327 | host = "" 328 | record_type = "TXT" 329 | answer = "google-site-verification=-6nHnrb5t1xNkD9zHiJm9hYTlAP7seIk-WLVaB1OveU" 330 | } 331 | 332 | # Mastodon 333 | resource "namedotcom_record" "record_mastodon_219371939" { 334 | domain_name = "nycmesh.net" 335 | host = "mastodon" 336 | record_type = "CNAME" 337 | answer = "kubernetes-lb-prod-sn3.nycmesh.net" 338 | } 339 | 340 | # Alternate domain for for Mastodon 341 | resource "namedotcom_record" "record_social_219371944" { 342 | domain_name = "nycmesh.net" 343 | host = "social" 344 | record_type = "CNAME" 345 | answer = "kubernetes-lb-prod-sn3.nycmesh.net" 346 | } 347 | 348 | resource "namedotcom_record" "gsg_displays" { 349 | domain_name = "nycmesh.net" 350 | host = "gsg-displays" 351 | record_type = "CNAME" 352 | answer = "kubernetes-lb-prod-sn3.nycmesh.net" 353 | } 354 | 355 | # Typo helper for Mastodon 356 | resource "namedotcom_record" "record_mastadon_219988024" { 357 | domain_name = "nycmesh.net" 358 | host = "mastadon" 359 | record_type = "CNAME" 360 | answer = "kubernetes-lb-prod-sn3.nycmesh.net" 361 | } 362 | 363 | # Stripe redirect to github pages 1/4 364 | resource "namedotcom_record" "stripeportal_108" { 365 | domain_name = "nycmesh.net" 366 | host = "stripeportal" 367 | record_type = "A" 368 | answer = "185.199.108.153" 369 | } 370 | 371 | # Stripe redirect to github pages 2/4 372 | resource "namedotcom_record" "stripeportal_109" { 373 | domain_name = "nycmesh.net" 374 | host = "stripeportal" 375 | record_type = "A" 376 | answer = "185.199.109.153" 377 | } 378 | 379 | # Stripe redirect to github pages 3/4 380 | resource "namedotcom_record" "stripeportal_110" { 381 | domain_name = "nycmesh.net" 382 | host = "stripeportal" 383 | record_type = "A" 384 | answer = "185.199.110.153" 385 | } 386 | 387 | # Stripe redirect to github pages 4/4 388 | resource "namedotcom_record" "stripeportal_111" { 389 | domain_name = "nycmesh.net" 390 | host = "stripeportal" 391 | record_type = "A" 392 | answer = "185.199.111.153" 393 | } 394 | 395 | # Invoice Ninja 396 | resource "namedotcom_record" "record_ninja_226273090" { 397 | domain_name = "nycmesh.net" 398 | host = "ninja" 399 | record_type = "A" 400 | answer = "165.227.70.230" 401 | } 402 | 403 | # Status page 404 | resource "namedotcom_record" "record_status_238885567" { 405 | domain_name = "nycmesh.net" 406 | host = "status" 407 | record_type = "A" 408 | answer = "164.92.117.225" 409 | } 410 | 411 | resource "namedotcom_record" "record_status_www" { 412 | domain_name = "nycmesh.net" 413 | host = "www.status" 414 | record_type = "CNAME" 415 | answer = "status.nycmesh.net" 416 | } 417 | 418 | # Test record, feel free to remove 419 | resource "namedotcom_record" "record__123" { 420 | answer = "127.0.0.1" 421 | domain_name = "nycmesh.net" 422 | host = "jamestest" 423 | record_type = "A" 424 | } 425 | 426 | ###### k8s stateless services ###### 427 | resource "namedotcom_record" "k8s_stateless_services_prod" { 428 | domain_name = "nycmesh.net" 429 | host = "k8s-stateless-prod" 430 | record_type = "CNAME" 431 | answer = "kubernetes-lb-prod-sn3.nycmesh.net" 432 | } 433 | 434 | resource "namedotcom_record" "k8s_stateless_services_dev" { 435 | domain_name = "nycmesh.net" 436 | host = "k8s-stateless-dev" 437 | record_type = "CNAME" 438 | answer = "kubernetes-lb-jon-sn3.nycmesh.net" 439 | } 440 | 441 | ###### Meshdb Prod ###### 442 | resource "namedotcom_record" "meshdb_prod_k8s_lb" { 443 | domain_name = "nycmesh.net" 444 | host = "kubernetes-lb-prod-sn3" 445 | record_type = "A" 446 | answer = "199.170.132.45" 447 | } 448 | 449 | resource "namedotcom_record" "meshdb_prod_sn10_k8s_lb" { 450 | domain_name = "nycmesh.net" 451 | host = "kubernetes-lb-prod-sn10" 452 | record_type = "A" 453 | answer = "23.158.16.22" 454 | } 455 | 456 | resource "namedotcom_record" "meshdb_prod_sn10_meshdb" { 457 | domain_name = "nycmesh.net" 458 | host = "sn10temp" 459 | record_type = "CNAME" 460 | answer = "kubernetes-lb-prod-sn10.nycmesh.net" 461 | } 462 | 463 | resource "namedotcom_record" "meshdb_prod_meshdb" { 464 | domain_name = "nycmesh.net" 465 | host = "db" 466 | record_type = "CNAME" 467 | answer = "kubernetes-lb-prod-sn3.nycmesh.net" 468 | } 469 | 470 | resource "namedotcom_record" "meshdb_prod_adminmap" { 471 | domain_name = "nycmesh.net" 472 | host = "adminmap.db" 473 | record_type = "CNAME" 474 | answer = "k8s-stateless-prod.nycmesh.net" 475 | } 476 | 477 | resource "namedotcom_record" "meshdb_prod_los-backend" { 478 | domain_name = "nycmesh.net" 479 | host = "los-backend.db" 480 | record_type = "CNAME" 481 | answer = "kubernetes-lb-prod-sn3.nycmesh.net" 482 | } 483 | 484 | resource "namedotcom_record" "record_los_6530453" { 485 | domain_name = "nycmesh.net" 486 | host = "los" 487 | record_type = "CNAME" 488 | answer = "k8s-stateless-prod.nycmesh.net" 489 | } 490 | 491 | resource "namedotcom_record" "meshdb_prod_forms" { 492 | domain_name = "nycmesh.net" 493 | host = "forms" 494 | record_type = "CNAME" 495 | answer = "k8s-stateless-prod.nycmesh.net" 496 | } 497 | 498 | # Public grafana 499 | resource "namedotcom_record" "record_stats_cname" { 500 | domain_name = "nycmesh.net" 501 | host = "stats" 502 | record_type = "CNAME" 503 | answer = "k8s-stateless-prod.nycmesh.net" 504 | } 505 | 506 | ###### Meshdb Dev ###### 507 | resource "namedotcom_record" "meshdb_dev_k8s_lb" { 508 | domain_name = "nycmesh.net" 509 | host = "kubernetes-lb-jon-sn3" 510 | record_type = "A" 511 | answer = "199.170.132.46" 512 | } 513 | 514 | resource "namedotcom_record" "devdb" { 515 | domain_name = "nycmesh.net" 516 | host = "devdb" 517 | record_type = "CNAME" 518 | answer = "kubernetes-lb-jon-sn3.nycmesh.net" 519 | } 520 | 521 | resource "namedotcom_record" "devpano" { 522 | domain_name = "nycmesh.net" 523 | host = "devpano" 524 | record_type = "CNAME" 525 | answer = "kubernetes-lb-jon-sn3.nycmesh.net" 526 | } 527 | 528 | resource "namedotcom_record" "devpano_api" { 529 | domain_name = "nycmesh.net" 530 | host = "api.devpano" 531 | record_type = "CNAME" 532 | answer = "kubernetes-lb-jon-sn3.nycmesh.net" 533 | } 534 | 535 | resource "namedotcom_record" "devimg" { 536 | domain_name = "nycmesh.net" 537 | host = "devimg" 538 | record_type = "CNAME" 539 | answer = "kubernetes-lb-jon-sn3.nycmesh.net" 540 | } 541 | 542 | resource "namedotcom_record" "devdb_all" { 543 | domain_name = "nycmesh.net" 544 | host = "*.devdb" 545 | record_type = "CNAME" 546 | answer = "kubernetes-lb-jon-sn3.nycmesh.net" 547 | } 548 | 549 | ###### Meshdb Gamma ###### 550 | resource "namedotcom_record" "meshdb_gamma_k8s_lb" { 551 | domain_name = "nycmesh.net" 552 | host = "kubernetes-lb-gamma-jon-sn3" 553 | record_type = "A" 554 | answer = "199.170.132.42" 555 | } 556 | 557 | resource "namedotcom_record" "gammadb" { 558 | domain_name = "nycmesh.net" 559 | host = "gammadb" 560 | record_type = "CNAME" 561 | answer = "kubernetes-lb-gamma-jon-sn3.nycmesh.net" 562 | } 563 | 564 | resource "namedotcom_record" "gammadb_all" { 565 | domain_name = "nycmesh.net" 566 | host = "*.gammadb" 567 | record_type = "CNAME" 568 | answer = "kubernetes-lb-gamma-jon-sn3.nycmesh.net" 569 | } 570 | 571 | ###### Website Map ###### 572 | resource "namedotcom_record" "website_map" { 573 | domain_name = "nycmesh.net" 574 | host = "map" 575 | record_type = "CNAME" 576 | answer = "k8s-stateless-prod.nycmesh.net" 577 | } 578 | 579 | resource "namedotcom_record" "website_map_dev" { 580 | domain_name = "nycmesh.net" 581 | host = "devmap" 582 | record_type = "CNAME" 583 | answer = "k8s-stateless-dev.nycmesh.net" 584 | } 585 | --------------------------------------------------------------------------------