├── .gitignore ├── README.md ├── files └── etc │ ├── default │ └── lxc-net │ ├── firehol │ ├── cloudflare.netset │ ├── firehol.conf │ └── fireqos.conf │ ├── locale.gen │ ├── lxc │ └── default.conf │ ├── nginx │ ├── cloudflare.conf │ ├── conf.d │ │ ├── netdata.conf │ │ └── status.conf │ └── snippets │ │ ├── ssl-params.conf │ │ └── ssl.conf │ ├── postfix │ ├── generic │ └── recipient_canonical.pcre │ ├── profile.d │ └── prompt.sh │ ├── rc.local │ ├── sudoers.d │ └── costa │ ├── sysctl.d │ ├── core.conf │ ├── entropy.conf │ ├── inotify.conf │ ├── net-buffers.conf │ ├── net-security.conf │ └── synproxy.conf │ └── systemd │ └── system │ ├── firehol.service │ └── fireqos.service ├── gvpe ├── README.md ├── compile-gvpe │ ├── build-gvpe-darwin.sh │ ├── build-gvpe-freebsd-static.sh │ └── build-gvpe-linux-static.sh ├── conf.d │ ├── gvpe.service │ ├── if-up │ ├── node-changed │ ├── node-down │ ├── node-up │ └── setup.sh ├── nodes.conf ├── provision-gvpe.sh ├── sbin.freebsd │ ├── gvpe │ └── gvpectrl ├── sbin.linux │ ├── gvpe │ └── gvpectrl └── sbin │ ├── gvpe-routing-order.sh │ ├── gvpe-status.sh │ └── gvpe-supervisor.sh ├── install-all-firehol.sh ├── install-required-packages.sh ├── install.sh ├── lxc ├── install.sh ├── update-netdata.sh └── update-system.sh └── newrelic.sh /.gitignore: -------------------------------------------------------------------------------- 1 | *~ 2 | .idea/ 3 | CMakeLists.txt 4 | 5 | gvpe/conf.d/gvpe.conf 6 | gvpe/conf.d/hosts.real 7 | gvpe/conf.d/hosts.vpn 8 | gvpe/conf.d/pubkey 9 | gvpe/conf.d/status 10 | gvpe/keys/ 11 | gvpe/ids/ 12 | gvpe/systemd/ 13 | gvpe/*.tar.gz 14 | gvpe/*.log 15 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # netdata-demo-site 2 | Scripts to create a netdata-demo-site 3 | -------------------------------------------------------------------------------- /files/etc/default/lxc-net: -------------------------------------------------------------------------------- 1 | USE_LXC_BRIDGE="true" 2 | -------------------------------------------------------------------------------- /files/etc/firehol/cloudflare.netset: -------------------------------------------------------------------------------- 1 | 103.21.244.0/22 2 | 103.22.200.0/22 3 | 103.31.4.0/22 4 | 104.16.0.0/12 5 | 108.162.192.0/18 6 | 131.0.72.0/22 7 | 141.101.64.0/18 8 | 162.158.0.0/15 9 | 172.64.0.0/13 10 | 173.245.48.0/20 11 | 188.114.96.0/20 12 | 190.93.240.0/20 13 | 197.234.240.0/22 14 | 198.41.128.0/17 15 | 199.27.128.0/21 16 | -------------------------------------------------------------------------------- /files/etc/firehol/firehol.conf: -------------------------------------------------------------------------------- 1 | #!/sbin/firehol 2 | 3 | FIREHOL_ENABLE_SPINNER=1 4 | FIREHOL_LOG_MODE="NFLOG" 5 | 6 | FIREHOL_CONNTRACK_HASHSIZE=$[65536 * 2] 7 | FIREHOL_CONNTRACK_MAX=$[FIREHOL_CONNTRACK_HASHSIZE * 4] 8 | 9 | 10 | # ----------------------------------------------------------------------------- 11 | # services 12 | 13 | server_netdata_ports="tcp/19999" 14 | client_netdata_ports="any" 15 | 16 | server_gvpe_ports="tcp,udp/49999" 17 | client_gvpe_ports="any" 18 | 19 | 20 | # ----------------------------------------------------------------------------- 21 | # interfaces 22 | 23 | wan= 24 | vpns="vpn0" 25 | containers="docker0,lxcbr0" 26 | 27 | 28 | # ----------------------------------------------------------------------------- 29 | # find the WAN interface and IP 30 | 31 | # the internet network interface may not be: eth0 32 | # to find it, we use ip route get 8.8.8.8, which returns the device that will 33 | # be used to reach that IP. 34 | 35 | # find the WAN interface name 36 | wan="$(ip -4 route get 8.8.8.8 | grep -oP "dev [^[:space:]]+ " | cut -d ' ' -f 2)" 37 | [ -z "${wan}" ] \ 38 | && wan="eth0" \ 39 | && echo >&2 "Assuming default gateway is via device: ${wan}" 40 | 41 | # find the IP of the WAN interface 42 | myip=( $(ip -4 address show ${wan} | grep 'inet' | sed 's/.*inet \([0-9\.]\+\).*/\1/') ) 43 | if [ ! -z "${myip[*]}" ] 44 | then 45 | # pass tcp/80,443 through synproxy 46 | synproxy4 in inface ${wan} dst "${myip[*]}" dport 80,443 accept 47 | 48 | # snat all private IPs (vpn, containers) attempting to reach the internet 49 | masquerade4 ${wan} src not "${myip[*]}" 50 | else 51 | echo >&2 "Cannot find interface ${wan} IP(s). Certain features are disabled." 52 | masquerade4 "${wan}" 53 | fi 54 | 55 | 56 | # ----------------------------------------------------------------------------- 57 | # fix containers dhcp checksum 58 | 59 | iptables -t mangle -A POSTROUTING -p udp --dport 68 -j CHECKSUM --checksum-fill 60 | 61 | 62 | # ----------------------------------------------------------------------------- 63 | # accept web traffic only from cloudflare 64 | 65 | ipset4 create cloudflare hash:net 66 | ipset4 addfile cloudflare cloudflare.netset 67 | 68 | # drop all other traffic to tcp/80,443 69 | ipv4 rule table filter chain INPUT \ 70 | inface ${wan} proto tcp dport 80,443 src not ipset:cloudflare \ 71 | state NEW action DROP loglimit "DROPPED NOT CLOUDFLARE" 72 | 73 | 74 | # ----------------------------------------------------------------------------- 75 | # create docker chains 76 | 77 | #iptables -t nat -N DOCKER 78 | #iptables -t filter -N DOCKER 79 | #iptables -t filter -N DOCKER-ISOLATION 80 | docker_bridge docker0 172.17.0.0/16 81 | 82 | 83 | # ----------------------------------------------------------------------------- 84 | # accept SSH on port 2222 85 | 86 | # redirect traffic towards tcp/2222 to tcp/22 87 | redirect to 22 proto tcp dport 2222 inface ${wan} 88 | 89 | # drop SSH on port 22 90 | redirect to 9 proto tcp dport 22 inface ${wan} 91 | 92 | 93 | # ----------------------------------------------------------------------------- 94 | # packet filtering, protecting this host 95 | 96 | # our administration VPN 97 | # DO NOT GIVE IPs, to allow DHCP 98 | interface "${vpns},${containers}" private 99 | policy accept 100 | 101 | 102 | # the internet interface 103 | interface any world 104 | policy drop 105 | 106 | server gvpe accept 107 | server ssh accept 108 | server http accept 109 | server https accept 110 | server icmp accept 111 | client all accept 112 | 113 | 114 | # ----------------------------------------------------------------------------- 115 | # packet filtering, protecting the ones we route traffic for 116 | 117 | # allow the VPNs and the containers to talk to each other 118 | router private2private \ 119 | inface "${vpns},${containers}" src "${PRIVATE_IPS}" \ 120 | outface "${vpns},${containers}" dst "${PRIVATE_IPS}" 121 | policy accept 122 | 123 | # allow the VPN and the containers to access (as clients) the internet 124 | router world2private \ 125 | inface not "${vpns},${containers}" src not "${PRIVATE_IPS}" \ 126 | outface "${vpns},${containers}" dst "${PRIVATE_IPS}" 127 | policy reject 128 | client all accept 129 | -------------------------------------------------------------------------------- /files/etc/firehol/fireqos.conf: -------------------------------------------------------------------------------- 1 | 2 | wan="$(ip -4 route get 8.8.8.8 | grep -oP "dev [^[:space:]]+ " | cut -d ' ' -f 2)" 3 | [ -z "${wan}" ] && wan="eth0" && echo >&2 "Assuming default gateway is via device: ${wan}" 4 | 5 | server_ssh_ports="tcp/22,2222" 6 | server_gvpe_ports="tcp,udp/49999" 7 | 8 | for xx in ${wan}/world 9 | do 10 | dev=${xx/\/*/} 11 | name=${xx/*\//} 12 | 13 | ip link show dev $dev >/dev/null 2>&1 14 | [ $? -ne 0 ] && continue 15 | 16 | interface $dev $name bidirectional ethernet balanced minrate 15kbit rate 100Mbit 17 | class arp 18 | match arp 19 | 20 | class icmp max 1Mbit 21 | match icmp 22 | 23 | class dns 24 | server dns 25 | client dns 26 | 27 | class ntp max 1Mbit 28 | server ntp 29 | client ntp 30 | 31 | class mail max 1Mbit 32 | server smtp 33 | client smtp 34 | 35 | class ssh 36 | server ssh 37 | client ssh 38 | 39 | class rsync 40 | server rsync 41 | client rsync 42 | 43 | class vpn 44 | server gvpe 45 | client gvpe 46 | match protocol AH 47 | 48 | class web_server input commit 10Mbit max 10Mbit output commit 50Mbit max 50Mbit 49 | server http 50 | server https 51 | 52 | class client max 20Mbit 53 | client surfing 54 | done 55 | -------------------------------------------------------------------------------- /files/etc/locale.gen: -------------------------------------------------------------------------------- 1 | el_GR.UTF-8 UTF-8 2 | en_US.UTF-8 UTF-8 3 | -------------------------------------------------------------------------------- /files/etc/lxc/default.conf: -------------------------------------------------------------------------------- 1 | lxc.network.type = veth 2 | lxc.network.link = lxcbr0 3 | lxc.network.flags = up 4 | lxc.network.name = eth0 5 | # lxc.network.veth.pair = container-eth0 6 | lxc.network.hwaddr = 00:16:3e:xx:xx:xx 7 | 8 | -------------------------------------------------------------------------------- /files/etc/nginx/cloudflare.conf: -------------------------------------------------------------------------------- 1 | set_real_ip_from 103.21.244.0/22; 2 | set_real_ip_from 103.22.200.0/22; 3 | set_real_ip_from 103.31.4.0/22; 4 | set_real_ip_from 104.16.0.0/12; 5 | set_real_ip_from 108.162.192.0/18; 6 | set_real_ip_from 131.0.72.0/22; 7 | set_real_ip_from 141.101.64.0/18; 8 | set_real_ip_from 162.158.0.0/15; 9 | set_real_ip_from 172.64.0.0/13; 10 | set_real_ip_from 173.245.48.0/20; 11 | set_real_ip_from 188.114.96.0/20; 12 | set_real_ip_from 190.93.240.0/20; 13 | set_real_ip_from 197.234.240.0/22; 14 | set_real_ip_from 198.41.128.0/17; 15 | set_real_ip_from 199.27.128.0/21; 16 | real_ip_header CF-Connecting-IP; 17 | -------------------------------------------------------------------------------- /files/etc/nginx/conf.d/netdata.conf: -------------------------------------------------------------------------------- 1 | upstream backend { 2 | server 127.0.0.1:19999; 3 | keepalive 1024; 4 | } 5 | 6 | server { 7 | listen *:80; 8 | listen *:443 ssl; 9 | server_name MY_REAL_IP_TO_BE_REPLACED_HERE 10 | netdata.firehol.org 11 | netdata1.firehol.org 12 | netdata2.firehol.org 13 | netdata3.firehol.org 14 | netdata4.firehol.org 15 | # 16 | my-netdata.io 17 | mynetdata.io 18 | netdata.online 19 | netdata.rocks 20 | # 21 | alpine.my-netdata.io 22 | # 23 | athens.my-netdata.io 24 | athens.mynetdata.io 25 | athens.netdata.online 26 | athens.netdata.rocks 27 | # 28 | atlanta.my-netdata.io 29 | atlanta.mynetdata.io 30 | atlanta.netdata.online 31 | atlanta.netdata.rocks 32 | # 33 | aws-fra.my-netdata.io 34 | # 35 | azure-west-eu.my-netdata.io 36 | # 37 | bangalore.my-netdata.io 38 | bangalore.mynetdata.io 39 | bangalore.netdata.online 40 | bangalore.netdata.rocks 41 | # 42 | cdn77.my-netdata.io 43 | # 44 | frankfurt.my-netdata.io 45 | frankfurt.mynetdata.io 46 | frankfurt.netdata.online 47 | frankfurt.netdata.rocks 48 | # 49 | london.my-netdata.io 50 | london.mynetdata.io 51 | london.netdata.online 52 | london.netdata.rocks 53 | # 54 | london2.my-netdata.io 55 | london2.mynetdata.io 56 | london2.netdata.online 57 | london2.netdata.rocks 58 | # 59 | london3.my-netdata.io 60 | london3.mynetdata.io 61 | london3.netdata.online 62 | london3.netdata.rocks 63 | # 64 | newyork.my-netdata.io 65 | newyork.mynetdata.io 66 | newyork.netdata.online 67 | newyork.netdata.rocks 68 | # 69 | octopuscs.my-netdata.io 70 | octopuscs.mynetdata.io 71 | octopuscs.netdata.online 72 | octopuscs.netdata.rocks 73 | # 74 | registry.my-netdata.io 75 | registry.mynetdata.io 76 | registry.netdata.online 77 | registry.netdata.rocks 78 | # 79 | sanfrancisco.my-netdata.io 80 | sanfrancisco.mynetdata.io 81 | sanfrancisco.netdata.online 82 | sanfrancisco.netdata.rocks 83 | # 84 | singapore.my-netdata.io 85 | singapore.mynetdata.io 86 | singapore.netdata.online 87 | singapore.netdata.rocks 88 | # 89 | stackscale.my-netdata.io 90 | stackscale.mynetdata.io 91 | stackscale.netdata.online 92 | stackscale.netdata.rocks 93 | # 94 | toronto.my-netdata.io 95 | toronto.mynetdata.io 96 | toronto.netdata.online 97 | toronto.netdata.rocks 98 | # 99 | ventureer.my-netdata.io 100 | ventureer.mynetdata.io 101 | ventureer.netdata.online 102 | ventureer.netdata.rocks 103 | ; 104 | 105 | location / { 106 | proxy_set_header X-Forwarded-Host $host; 107 | proxy_set_header X-Forwarded-Server $host; 108 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 109 | # proxy_pass http://127.0.0.1:19999; 110 | proxy_pass http://backend; 111 | proxy_http_version 1.1; 112 | proxy_pass_request_headers on; 113 | proxy_set_header Connection "keep-alive"; 114 | proxy_store off; 115 | #gzip on; 116 | #gzip_proxied any; 117 | #gzip_types *; 118 | } 119 | 120 | # lets-encrypt authentication support 121 | location /.well-known/acme-challenge/ { 122 | root /var/www/html; 123 | } 124 | 125 | if ($http_host = netdata.firehol.org) { 126 | rewrite ^/$ /demosites.html break; 127 | } 128 | if ($http_host = netdata.rocks) { 129 | rewrite ^/$ /demosites.html break; 130 | } 131 | if ($http_host = netdata.online) { 132 | rewrite ^/$ /demosites.html break; 133 | } 134 | if ($http_host = mynetdata.io) { 135 | rewrite ^/$ /demosites.html break; 136 | } 137 | if ($http_host = my-netdata.io) { 138 | rewrite ^/$ /demosites.html break; 139 | } 140 | rewrite ^/default\.html$ /index.html break; 141 | 142 | include cloudflare.conf; 143 | 144 | # WordPress Pingback Request Denial 145 | if ($http_user_agent ~* "WordPress") { 146 | return 403; 147 | } 148 | 149 | if ($request_method !~ ^(GET|HEAD|OPTIONS)$ ) { 150 | return 403; 151 | } 152 | 153 | # include netdata-attacks.conf; 154 | # include firehol_webserver.conf; 155 | # include pushing_inertia_blocklist.conf; 156 | # include netdata-abusers.conf; 157 | } 158 | -------------------------------------------------------------------------------- /files/etc/nginx/conf.d/status.conf: -------------------------------------------------------------------------------- 1 | server { 2 | listen 127.0.0.1:80; 3 | 4 | location /stub_status { 5 | stub_status on; 6 | } 7 | } 8 | -------------------------------------------------------------------------------- /files/etc/nginx/snippets/ssl-params.conf: -------------------------------------------------------------------------------- 1 | # from https://cipherli.st/ 2 | # and https://raymii.org/s/tutorials/Strong_SSL_Security_On_nginx.html 3 | 4 | ssl_protocols TLSv1 TLSv1.1 TLSv1.2; 5 | ssl_prefer_server_ciphers on; 6 | ssl_ciphers "EECDH+AESGCM:EDH+AESGCM:AES256+EECDH:AES256+EDH"; 7 | ssl_ecdh_curve secp384r1; 8 | ssl_session_cache shared:SSL:10m; 9 | ssl_session_tickets off; 10 | ssl_stapling on; 11 | ssl_stapling_verify on; 12 | resolver 8.8.8.8 8.8.4.4 valid=300s; 13 | resolver_timeout 5s; 14 | # disable HSTS header for now 15 | #add_header Strict-Transport-Security "max-age=63072000; includeSubDomains; preload"; 16 | add_header X-Frame-Options DENY; 17 | add_header X-Content-Type-Options nosniff; 18 | 19 | ssl_dhparam /etc/ssl/certs/dhparam.pem; 20 | 21 | 22 | -------------------------------------------------------------------------------- /files/etc/nginx/snippets/ssl.conf: -------------------------------------------------------------------------------- 1 | # enable SSL 2 | listen 443 ssl default_server; 3 | listen [::]:443 ssl default_server; 4 | 5 | # include the certificates 6 | include snippets/ssl-certs.conf; 7 | 8 | # strengthen SSL params 9 | include snippets/ssl-params.conf; 10 | 11 | # allow lets-encrypt to bypass netdata 12 | location ~ /.well-known { 13 | allow all; 14 | } 15 | -------------------------------------------------------------------------------- /files/etc/postfix/generic: -------------------------------------------------------------------------------- 1 | root@my-netdata.io costa@tsaousis.gr 2 | costa@my-netdata.io costa@tsaousis.gr 3 | @my-netdata.io costa@tsaousis.gr 4 | 5 | -------------------------------------------------------------------------------- /files/etc/postfix/recipient_canonical.pcre: -------------------------------------------------------------------------------- 1 | /^(.*@)my-netdata\.io$/ costa@tsaousis.gr 2 | -------------------------------------------------------------------------------- /files/etc/profile.d/prompt.sh: -------------------------------------------------------------------------------- 1 | declare -- PS1="\\[\\033]0;\\u@\\h:\\w\\007\\]\\[\\033[01;31m\\]\\h\\[\\033[01;34m\\] \\W \\\$\\[\\033[00m\\] " 2 | -------------------------------------------------------------------------------- /files/etc/rc.local: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # 3 | # rc.local 4 | # 5 | # This script is executed at the end of each multiuser runlevel. 6 | # Make sure that the script will "exit 0" on success or any other 7 | # value on error. 8 | # 9 | # In order to enable or disable this script just change the execution 10 | # bits. 11 | # 12 | # By default this script does nothing. 13 | 14 | 15 | # enable KSM 16 | echo 1 >/sys/kernel/mm/ksm/run 17 | echo 1000 >/sys/kernel/mm/ksm/sleep_millisecs 18 | 19 | sleep 60 20 | 21 | # wait for conntract kernel module to be initialized 22 | ok=60 23 | while [ $ok -ne 0 ] 24 | do 25 | ok=$(( ok - 1 )) 26 | sysctl -w net/netfilter/nf_conntrack_tcp_loose=0 && ok=0 27 | [ $ok -ne 0 ] && sleep 1 28 | done 29 | sysctl -w net.netfilter.nf_conntrack_max=$[65536*4] 30 | echo 65536 > /sys/module/nf_conntrack/parameters/hashsize 31 | 32 | firehol start 33 | fireqos start 34 | 35 | 36 | # compiled named 37 | # /opt/named/sbin/named -u bind -4 38 | 39 | # restart netdata after named 40 | service netdata restart 41 | 42 | exit 0 43 | -------------------------------------------------------------------------------- /files/etc/sudoers.d/costa: -------------------------------------------------------------------------------- 1 | costa ALL=(ALL) NOPASSWD: ALL 2 | -------------------------------------------------------------------------------- /files/etc/sysctl.d/core.conf: -------------------------------------------------------------------------------- 1 | kernel.core_pattern=/tmp/core.%e.%p.%h.%t 2 | -------------------------------------------------------------------------------- /files/etc/sysctl.d/entropy.conf: -------------------------------------------------------------------------------- 1 | 2 | # https://redhatlinux.guru/index.php/2016/04/03/increase-system-entropy-on-rhel-centos-6-and-7/ 3 | kernel.random.read_wakeup_threshold = 2048 4 | kernel.random.write_wakeup_threshold = 3072 5 | -------------------------------------------------------------------------------- /files/etc/sysctl.d/inotify.conf: -------------------------------------------------------------------------------- 1 | # due to the number of containers run 2 | # we have to increase inotify thresholds 3 | 4 | fs.inotify.max_queued_events = 65536 5 | fs.inotify.max_user_instances = 1024 6 | fs.inotify.max_user_watches = 32768 7 | 8 | -------------------------------------------------------------------------------- /files/etc/sysctl.d/net-buffers.conf: -------------------------------------------------------------------------------- 1 | # http://www.nateware.com/linux-network-tuning-for-2013.html 2 | # Increase Linux autotuning TCP buffer limits 3 | # Set max to 16MB for 1GE and 32M (33554432) or 54M (56623104) for 10GE 4 | # Don't set tcp_mem itself! Let the kernel scale it based on RAM. 5 | net.core.rmem_max = 16777216 6 | net.core.wmem_max = 16777216 7 | net.core.rmem_default = 16777216 8 | net.core.wmem_default = 16777216 9 | net.core.optmem_max = 40960 10 | 11 | # cloudflare uses this for balancing latency and throughput 12 | # https://blog.cloudflare.com/the-story-of-one-latency-spike/ 13 | net.ipv4.tcp_rmem = 4096 1048576 2097152 14 | 15 | net.ipv4.tcp_wmem = 4096 65536 16777216 16 | 17 | # NICs RX dequeue options 18 | # budget should be less than backlog to avoid dropped packets 19 | net.core.netdev_max_backlog = 50000 20 | net.core.netdev_budget = 25000 21 | 22 | # Make room for more TIME_WAIT sockets due to more clients, 23 | # and allow them to be reused if we run out of sockets 24 | net.ipv4.tcp_max_syn_backlog = 30000 25 | net.ipv4.tcp_max_tw_buckets = 2000000 26 | net.ipv4.tcp_tw_reuse = 1 27 | net.ipv4.tcp_fin_timeout = 10 28 | 29 | # Disable TCP slow start on idle connections 30 | net.ipv4.tcp_slow_start_after_idle = 0 31 | 32 | # for UDP 33 | net.ipv4.udp_rmem_min = 8192 34 | net.ipv4.udp_wmem_min = 8192 35 | -------------------------------------------------------------------------------- /files/etc/sysctl.d/net-security.conf: -------------------------------------------------------------------------------- 1 | # https://gist.github.com/rschmitty/5875625 2 | 3 | # Protect ICMP attacks 4 | net.ipv4.icmp_echo_ignore_broadcasts = 1 5 | 6 | # Turn on protection for bad icmp error messages 7 | net.ipv4.icmp_ignore_bogus_error_responses = 1 8 | 9 | #https://www.ndchost.com/wiki/server-administration/hardening-tcpip-syn-flood 10 | #net.ipv4.tcp_syncookies = 1 11 | #net.ipv4.tcp_max_syn_backlog = 2048 12 | #net.ipv4.tcp_synack_retries = 2 13 | 14 | # Log suspcicious packets, such as spoofed, source-routed, and redirect 15 | #net.ipv4.conf.all.log_martians = 1 16 | #net.ipv4.conf.default.log_martians = 1 17 | 18 | # Disables these ipv4 features, not very legitimate uses 19 | #net.ipv4.conf.all.accept_source_route = 0 20 | #net.ipv4.conf.default.accept_source_route = 0 21 | 22 | # timeout TCP sockets retries to about 1 - 1,5 min 23 | # the default is 15 (about 19 minutes) 24 | net.ipv4.tcp_retries2 = 7 25 | -------------------------------------------------------------------------------- /files/etc/sysctl.d/synproxy.conf: -------------------------------------------------------------------------------- 1 | net.ipv4.conf.lo.rp_filter = 0 2 | net.ipv4.conf.eth0.route_localnet = 1 3 | net.ipv4.tcp_syncookies = 1 4 | net.ipv4.tcp_timestamps = 1 5 | net.netfilter.nf_conntrack_tcp_loose = 0 6 | 7 | -------------------------------------------------------------------------------- /files/etc/systemd/system/firehol.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Firehol stateful packet filtering firewall for humans 3 | Documentation=man:firehol(1) man:firehol.conf(5) 4 | 5 | # we autodetect the ethernet IP, so we need the network 6 | #DefaultDependencies=no 7 | #Before=network-pre.target 8 | #Wants=network-pre.target 9 | 10 | Wants=systemd-modules-load.service local-fs.target 11 | After=systemd-modules-load.service local-fs.target 12 | 13 | Conflicts=shutdown.target 14 | Before=shutdown.target 15 | 16 | [Service] 17 | Type=oneshot 18 | RemainAfterExit=yes 19 | ExecStart=/usr/sbin/firehol start 20 | ExecStop=/usr/sbin/firehol stop 21 | 22 | [Install] 23 | WantedBy=multi-user.target 24 | -------------------------------------------------------------------------------- /files/etc/systemd/system/fireqos.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=FireQOS traffic shaping for humans 3 | Documentation=man:fireqos(1) 4 | 5 | [Service] 6 | Type=oneshot 7 | ExecStart=/usr/sbin/fireqos start 8 | ExecStop=/usr/sbin/fireqos stop 9 | RemainAfterExit=yes 10 | 11 | [Install] 12 | WantedBy=multi-user.target 13 | -------------------------------------------------------------------------------- /gvpe/README.md: -------------------------------------------------------------------------------- 1 | # GVPE 2 | 3 | [GVPE](http://software.schmorp.de/pkg/gvpe.html) is a mesh VPN: a number of hosts running GVPE will get a virtual Ethernet interface (TAP) connecting them all together via encrypted communication. It is mesh, meaning that all hosts talk directly to each other, although routed communication is also supported. 4 | 5 | 6 | ## GVPE and TINC 7 | 8 | [GVPE](http://software.schmorp.de/pkg/gvpe.html) is very close to [TINC](https://www.tinc-vpn.org/), with the following differences (I found): 9 | 10 | 1. GVPE security is decided at compile-time, while TINC at configure-time. This repo includes statically linked GVPE binaries for Linux and FreeBSD, compiled with the strongest security settings GVPE supports. 11 | 12 | 2. All GVPE hosts need to be provisioned to all nodes of the network, while TINC allows new hosts to join at any time, without re-configuring the entire network. 13 | 14 | 3. GVPE and TINC support direct and routed communication (routed is when 2 hosts can only talk via another host). GVPE however allows statically configuring the order routers will be evaluated, for each node. 15 | 16 | 4. TINC has some means to distribute routing between all the nodes, so that any node can push new subnets in the routing tables of all nodes. GVPE does not have this functionality. You can hardcode it in the configuration though (so, it is static). 17 | 18 | 5. GVPE seems to support more [protocols](http://pod.tst.eu/http://cvs.schmorp.de/gvpe/doc/gvpe.protocol.7.pod) for communication between nodes: 19 | 20 | - `rawip`, that uses raw IP frames marked with any protocol number: GRE, IPSEC AH, etc. This is the best choice, due to its low overhead. 21 | - `icmp`, that uses any ICMP message. The second best choice in terms of overheads. It can also enable communication is certain cases that all other protocol fail. 22 | - `udp`, the most common alternative to `rawip`. 23 | - `tcp`, GVPE supports plain TCP but also tunneled through HTTPS proxies tcp connections. 24 | - `dns` (this is not compiled in the binary files in this repo). 25 | 26 | 6. GVPE communication between any 2 nodes cannot be sniffed or faked even by other nodes in the same VPN. I am not sure if this is also supported by TINC. 27 | 28 | 7. GVPE packages do not seem to be available in many operating systems, while TINC seems to be available everywhere. 29 | 30 | 31 | ## So, why GVPE? 32 | 33 | Yes, it seems that TINC is more capable than GVPE. So why GVPE? 34 | 35 | I decided to use GVPE for interconnecting netdata VMs, because GVPE seems a lot simpler and straight forward. I liked the idea that all the nodes of the VPN will be statically configured and routing order is a configure-time decision. I also liked the broad range of transport protocols supported. 36 | 37 | The key limitations of GVPE in netdata case are: 38 | 39 | 1. The lack of any automated mechanism for attempting multiple protocols between any 2 nodes. So, if for example `rawip` does not work for a node, manual re-configuration of the node is required to switch to another protocol. 40 | 41 | 2. The lack of any automated mechanism to fallback from direct to routed communications between any 2 nodes. So, if for example, due to temporary network issues a node cannot directly reach another node, gvpe will not attempt to re-route packets via another node is can connect to both. 42 | 43 | 44 | ## What are the GVPE files on this repo? 45 | 46 | The files in this directory attempt to easily provision changes to the entire VPN network: 47 | 48 | 1. Statically built gvpe binaries are provided for x64 Linux and FreeBSD. These binaries should be usable on any x64 Linux and FreeBSD. 49 | 2. A global configuration file ([nodes.conf](nodes.conf)) handles all the configuration for all nodes. 50 | 3. GVPE configuration `gvpe.conf` is common on all nodes. The script maintains the order of nodes (nodeid) across runs. 51 | 4. Custom gvpe configuration for nodes in maintained in `local.conf` and `routing.conf`. Both of these files are gvpe configurations and are not overwritten by updates. 52 | 5. A script ([provision-gvpe.sh](provision-gvpe.sh)) provisions everything (initial setup and updates) on all nodes (via SSH). 53 | 6. GVPE `if-up`, `node-up`, `node-down` and `node-changed` are provided. Each node may have its own extensions using `if-up.local`, `node-up.local`, `node-down.local` and `node-changed.local` (which are not overwritten by updates). 54 | 7. An enhanced status script [`gvpe-status.sh`](sbin/gvpe-status.sh) is provided, that shows current connection state for all nodes. 55 | 8. A simple script [`gvpe-routing-order.sh](sbin/gvpe-routing-order.sh) is provided to ping all running nodes and based on their latency, decide the order they shoud be used as routers. 56 | 57 | 58 | ## Links 59 | 60 | - [GVPE home page](http://software.schmorp.de/pkg/gvpe.html) 61 | - [GVPE configuration reference](http://pod.tst.eu/http://cvs.schmorp.de/gvpe/doc/gvpe.conf.5.pod) 62 | - [GVPE supported transport protocol](http://pod.tst.eu/http://cvs.schmorp.de/gvpe/doc/gvpe.protocol.7.pod) 63 | - [GVPE O/S support](http://pod.tst.eu/http://cvs.schmorp.de/gvpe/doc/gvpe.osdep.5.pod) 64 | 65 | 66 | ## How to use the scripts on this repo 67 | 68 | 1. Edit [nodes.conf](nodes.conf) and describe your nodes. You will need to describe the following: 69 | 70 | - a `name` for each node. 71 | - the `public IP` and `port` of each node. You can give the word `dynamic` as the IP, if it is not static, in which case the other nodes will not initiate connections towards this node. I use `dynamic` for my home, office and laptop. 72 | - the `virtual IP` of the node, i.e. the IP the node should get once connected to the VPN. 73 | - the `SSH IP` of the node, i.e. the IP the scripts will use for provisioning files and configuration to the node. You can use the keyword `vpn` to use the VPN IP (you can do this after the network has been setup once), or `localhost` to provision the files on the host running the scripts (I use this for my laptop), or `none` to disable provisioning for a node. 74 | - the operating system of the node. Currently `linux` and `freebsd` are supported. 75 | 76 | This is mine: 77 | 78 | ```sh 79 | # ----------------------------------------------------------------------------- 80 | # configuration 81 | 82 | BASE_IP="172.16.254" 83 | 84 | # The CIDR of the entire VPN network 85 | VPN_NETWORK="${BASE_IP}.0/24" 86 | 87 | # The default port - each node may use a different 88 | PORT="49999" 89 | 90 | # HOSTNAME PUBLIC IP : PORT VIRTUAL IP O/S SSH IP 91 | node box dynamic:${PORT} ${BASE_IP}.1 linux 'vpn' 92 | node boxe dynamic:${PORT} ${BASE_IP}.2 linux 'vpn' 93 | node costa dynamic:$((PORT - 1)) ${BASE_IP}.3 linux 'localhost' 94 | node london 139.59.166.55:${PORT} ${BASE_IP}.10 linux '' 95 | node atlanta 185.93.0.89:${PORT} ${BASE_IP}.20 linux '' 96 | node west-europe 13.93.125.124:${PORT} ${BASE_IP}.30 linux '' 97 | node bangalore 139.59.0.212:${PORT} ${BASE_IP}.40 linux '' 98 | node frankfurt 46.101.193.115:${PORT} ${BASE_IP}.50 linux '' 99 | node sanfrancisco 104.236.149.236:${PORT} ${BASE_IP}.60 linux '' 100 | node toronto 159.203.30.96:${PORT} ${BASE_IP}.70 linux '' 101 | node singapore 128.199.80.131:${PORT} ${BASE_IP}.80 linux '' 102 | node newyork 162.243.236.205:${PORT} ${BASE_IP}.90 linux '' 103 | node aws-fra 35.156.164.190:${PORT} ${BASE_IP}.100 linux '' 104 | node netdata-build-server 40.68.190.151:${PORT} ${BASE_IP}.110 linux '' 105 | node freebsd 178.62.98.199:${PORT} ${BASE_IP}.120 freebsd '' 106 | 107 | # generate all configuration files locally 108 | configure 109 | 110 | # push all configuration files to all nodes 111 | provision 112 | 113 | # restart gvpe on all nodes 114 | activate 115 | ``` 116 | 117 | These are all the configuration you need to do. For most setups, the scripts will handle the rest. 118 | 119 | 120 | 2. Run [provision-gvpe.sh](provision-gvpe.sh) to generate the configuration, the public and private keys of the nodes and push everything to all nodes. The script uses SSH and RSYNC to update the nodes. If it fails to ssh to one of your servers it will stop - you have to fix it. I normally allow password-less ssh with my personal keys, so the script runs without any interaction. 121 | 122 | 3. When the script finsihes successfully, all systems that are using `systemd` will be running `gvpe` (binaries and script will be saved at `/usr/local/sbin` and configuration at `/etc/gvpe`). For non-systemd systems you will have to ssh to the nodes manually and add `/usr/local/sbin/gvpe-supervisor.sh start` to your `/etc/rc.local` or `/etc/local.d`. Run it also by hand to start gvpe without rebooting. You will not need to do this again. Re-executing `provision-gvpe.sh` will restart `gvpe` even on these nodes. 123 | 124 | 4. For most systems, no firewall change should be needed. Yes, gvpe will get connected without any change to your firewall. The reason is that all nodes are attempting to connect to all other nodes. So firewalls will encounted both incbound and outbound communications, making them believe the connection was an outbound one that should be allowed. This allows connections to be established without altering the firewall, at least for UDP communications. Of course you will need to configure the firewall for all nodes if you use any `dynamic` nodes. 125 | 126 | 5. You can see the status of all nodes by running [`/usr/local/sbin/gvpe-status.sh`](sbin/gvpe-status.sh) on each node. You will get something like this: 127 | 128 | ```sh 129 | # /usr/local/sbin/gvpe-status.sh 130 | 131 | GVPE Status on boxe (Node No 2) 132 | 133 | Total Events: 259 134 | Last Event: 2017-04-17 01:55:24 135 | 136 | Up 15, Down 0, Total 15 nodes 137 | 138 | ID Name VPN IP REAL IP STATUS SINCE 139 | 1 box 172.16.254.1 udp/195.97.5.206:49999 up 2017-04-17 01:54:48 140 | 3 costa 172.16.254.3 udp/10.11.13.143:49998 up 2017-04-17 01:44:18 141 | 4 london 172.16.254.10 udp/139.59.166.55:49999 up 2017-04-17 01:54:44 142 | 5 atlanta 172.16.254.20 udp/185.93.0.89:49999 up 2017-04-17 01:54:46 143 | 6 west-europe 172.16.254.30 udp/13.93.125.124:49999 up 2017-04-17 01:54:56 144 | 7 bangalore 172.16.254.40 udp/139.59.0.212:49999 up 2017-04-17 01:54:51 145 | 8 frankfurt 172.16.254.50 udp/46.101.193.115:49999 up 2017-04-17 01:54:50 146 | 9 sanfrancisco 172.16.254.60 udp/104.236.149.236:49999 up 2017-04-17 01:54:59 147 | 10 toronto 172.16.254.70 udp/159.203.30.96:49999 up 2017-04-17 01:54:59 148 | 11 singapore 172.16.254.80 udp/128.199.80.131:49999 up 2017-04-17 01:55:09 149 | 12 newyork 172.16.254.90 udp/162.243.236.205:49999 up 2017-04-17 01:55:00 150 | 13 aws-fra 172.16.254.100 udp/35.156.164.190:49999 up 2017-04-17 01:55:12 151 | 14 netdata-build-server 172.16.254.110 udp/40.68.190.151:49999 up 2017-04-17 01:47:38 152 | 15 freebsd 172.16.254.120 udp/178.62.98.199:49999 up 2017-04-17 01:55:24 153 | ``` 154 | 155 | 6. You can set the order gvpe routers will be evaluated, by running [`/usr/local/sbin/gvpe-routing-order.sh`](sbin/gvpe-routing-order.sh) on each node. 156 | 157 | 7. If a node fails connect, you may need to disable a few protocols for it. On the failing node, edit `/etc/gvpe/local.conf` to override any of the default settings. Do not edit `/etc/gvpe/gvpe.conf`, as this will be overwritten when `provision-gvpe.sh` pushes new configuration. On amazon EC2 nodes, for example, I had to disable `rawip` and `icmp`. 158 | 159 | 8. If you need to add static routes to the routing tables of the nodes or take other actions when gvpe starts, nodes are connected, disconnected or updated, you will have to do it by hand, on each node, by editing all the `.local` files in `/etc/gvpe`. Keep in mind you can place any of these files in `conf.d` and `provision-gvpe.sh` will push it to all nodes (but note it will be executed on all nodes, without exception - normally static routing should be executed on all nodes, except one - the node that should route this traffic to its local network - you should handle this case by code in the script). 160 | 161 | 9. The scripts try to maintain persistent IDs for nodes. GVPE uses the order of the nodes in `gvpe.conf` to determine the ID of each node. The ID is used in the packets to identify the keys that should be used. If an update re-arranges the nodes, gvpe on all nodes will have to be restarted for the communication to be restored. So, the scripts try to maintain the same ID for each node, indepently of the order the nodes appear in `nodes.conf`. If you need to remove a node through, I suggest to keep it with its `SSH IP` set to `none`. 162 | -------------------------------------------------------------------------------- /gvpe/compile-gvpe/build-gvpe-darwin.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | brew install openssl || exit 1 4 | brew install cvs || exit 1 5 | 6 | [ -f gpve.old ] && rm -rf gvpe.old 7 | [ -f gpve ] && mv -f gvpe gvpe.old 8 | 9 | cvs -z3 -d :pserver:anonymous@cvs.schmorp.de/schmorpforge co gvpe || exit 1 10 | cd gvpe || exit 1 11 | cvs -z3 -d :pserver:anonymous@cvs.schmorp.de/schmorpforge co libev || exit 1 12 | 13 | export LDFLAGS="-L/usr/local/opt/openssl/lib" 14 | export CPPFLAGS="-I/usr/local/opt/openssl/include" 15 | export PKG_CONFIG_PATH="/usr/local/opt/openssl/lib/pkgconfig" 16 | 17 | echo > doc/Makefile.am 18 | 19 | export AUTOMAKE="automake" 20 | export ACLOCAL="aclocal" 21 | #export LDFLAGS="-static" 22 | 23 | ./autogen.sh \ 24 | --prefix=/ \ 25 | --enable-iftype=native/darwin \ 26 | --enable-threads \ 27 | --enable-bridging \ 28 | --enable-rsa-length=3072 \ 29 | --enable-hmac-length=12 \ 30 | --enable-max-mtu=9000 \ 31 | --enable-cipher=aes-256 \ 32 | --enable-hmac-digest=ripemd160 \ 33 | --enable-auth-digest=sha512 \ 34 | --enable-rand-length=12 \ 35 | ${NULL} || exit 1 36 | 37 | make clean 38 | make -j2 || exit 1 39 | 40 | echo "ALL DONE" 41 | echo "You need these 2 files - openssl needs to be installed to run them" 42 | ls -l $(pwd)/src/gvpe $(pwd)/src/gvpectrl 43 | -------------------------------------------------------------------------------- /gvpe/compile-gvpe/build-gvpe-freebsd-static.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | sudo pkg install cvs || exit 1 4 | 5 | [ -f gpve.old ] && rm -rf gvpe.old 6 | [ -f gpve ] && mv -f gvpe gvpe.old 7 | 8 | cvs -z3 -d :pserver:anonymous@cvs.schmorp.de/schmorpforge co gvpe || exit 1 9 | cd gvpe || exit 1 10 | cvs -z3 -d :pserver:anonymous@cvs.schmorp.de/schmorpforge co libev || exit 1 11 | 12 | echo > doc/Makefile.am 13 | 14 | cat >m4/Makefile.am.in <<"EOF" 15 | ## Process this file with automake to produce Makefile.in -*-Makefile-*- 16 | 17 | ##m4-files-begin 18 | ##m4-files-end 19 | 20 | Makefile.am: Makefile.am.in 21 | rm -f $@ $@t 22 | #sed -n '1,/^##m4-files-begin/p' $< > $@t 23 | ( echo EXTRA_DIST = README Makefile.am.in; \ 24 | find . -type f -name '*.m4' -print |sed 's,^\./,,' |sort ) \ 25 | |fmt | (tr '\012' @; echo) \ 26 | |sed 's/@$$/%/;s/@/ \\@/g' |tr @% '\012\012' \ 27 | >> $@t 28 | #sed -n '/^##m4-files-end/,$$p' $< >> $@t 29 | chmod a-w $@t 30 | mv $@t $@ 31 | EOF 32 | 33 | export AUTOMAKE="automake" 34 | export ACLOCAL="aclocal" 35 | export LDFLAGS="-static" 36 | 37 | ./autogen.sh \ 38 | --prefix=/ \ 39 | --enable-threads \ 40 | --enable-rsa-length=3072 \ 41 | --enable-hmac-length=12 \ 42 | --enable-max-mtu=9000 \ 43 | --enable-cipher=aes-256 \ 44 | --enable-hmac-digest=ripemd160 \ 45 | --enable-auth-digest=sha512 \ 46 | --enable-rand-length=12 \ 47 | ${NULL} || exit 1 48 | 49 | make clean 50 | make -j2 || exit 1 51 | 52 | echo "ALL DONE" 53 | echo "You need these 2 files - they are statically linked" 54 | ls -l $(pwd)/src/gvpe $(pwd)/src/gvpectrl 55 | -------------------------------------------------------------------------------- /gvpe/compile-gvpe/build-gvpe-linux-static.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | run() { 4 | echo >&2 " " 5 | echo >&2 "${PWD} > ${@}" 6 | "${@}" 7 | ret=$? 8 | 9 | if [ "${ret}" = "0" ] 10 | then 11 | echo >&2 " - OK - " 12 | else 13 | echo >&2 " - FAILED - ${ret} " 14 | fi 15 | 16 | return ${ret} 17 | } 18 | 19 | if [ "${1}" != "inside-container" ] 20 | then 21 | ME="$(basename "${0}")" 22 | DIR="$(dirname "${0}")" 23 | 24 | cd "${DIR}" || exit 1 25 | DIR="$(pwd)" 26 | 27 | echo "ME : ${ME}" 28 | echo "DIR : ${DIR}" 29 | 30 | ret=0 31 | 32 | run sudo docker run -a stdin -a stdout -a stderr -i -t \ 33 | -v "${DIR}:/tmp/mapped:rw" alpine:edge \ 34 | /bin/sh "/tmp/mapped/${ME}" inside-container 35 | ret=$? 36 | 37 | if [ ${ret} -eq 0 ] 38 | then 39 | echo "Copying generated binaries to ${DIR}/../sbin.linux/" 40 | run mv ${DIR}/gvpe ${DIR}/../sbin.linux/ 41 | run mv ${DIR}/gvpectrl ${DIR}/../sbin.linux/ 42 | fi 43 | 44 | exit ${ret} 45 | fi 46 | 47 | run apk update || exit 1 48 | run apk add --no-cache \ 49 | bash \ 50 | wget \ 51 | curl \ 52 | ncurses \ 53 | git \ 54 | netcat-openbsd \ 55 | alpine-sdk \ 56 | autoconf \ 57 | automake \ 58 | gcc \ 59 | make \ 60 | libtool \ 61 | pkgconfig \ 62 | util-linux-dev \ 63 | libressl-dev \ 64 | gnutls-dev \ 65 | zlib-dev \ 66 | libmnl-dev \ 67 | libnetfilter_acct-dev \ 68 | cvs \ 69 | ${NULL} || exit 1 70 | 71 | if [ ! -d /usr/src ] 72 | then 73 | run mkdir -p /usr/src || exit 1 74 | fi 75 | cd /usr/src || exit 1 76 | 77 | if [ ! -d gvpe ] 78 | then 79 | run cvs -z3 -d :pserver:anonymous@cvs.schmorp.de/schmorpforge co gvpe || exit 1 80 | fi 81 | 82 | run cd gvpe 83 | 84 | if [ ! -d libev ] 85 | then 86 | run cvs -z3 -d :pserver:anonymous@cvs.schmorp.de/schmorpforge co libev || exit 1 87 | fi 88 | 89 | echo > doc/Makefile.am 90 | echo > lib/getopt1.c 91 | 92 | export AUTOMAKE="automake" 93 | export ACLOCAL="aclocal" 94 | export LDFLAGS="-static" 95 | 96 | # lower 15 seconds to 10 seconds for re-connecting 97 | #sed -i "s|^ else if (when < -15)$| else if (when < -10)|g" src/connection.C || echo >& " --- FAILED TO PATCH CONNECTION.C --- " 98 | 99 | run ./autogen.sh \ 100 | --prefix=/ \ 101 | --enable-iftype=native/linux \ 102 | --enable-threads \ 103 | --enable-rsa-length=3072 \ 104 | --enable-hmac-length=12 \ 105 | --enable-max-mtu=9000 \ 106 | --enable-cipher=aes-256 \ 107 | --enable-hmac-digest=ripemd160 \ 108 | --enable-auth-digest=sha512 \ 109 | --enable-static-daemon \ 110 | ${NULL} 111 | 112 | # --enable-bridging \ 113 | # --enable-rand-length=12 \ 114 | 115 | run make clean 116 | run make -j8 || exit 1 117 | 118 | echo "gvpe linking:" 119 | run ldd src/gvpe 120 | run cp src/gvpe /tmp/mapped/ 121 | 122 | echo 123 | echo "gvpectrl linking:" 124 | run ldd src/gvpectrl 125 | run cp src/gvpectrl /tmp/mapped/ 126 | 127 | -------------------------------------------------------------------------------- /gvpe/conf.d/gvpe.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=gvpe 3 | After=network.target 4 | Before=remote-fs.target 5 | 6 | [Service] 7 | ExecStart=/usr/local/sbin/gvpe-supervisor.sh systemd-start 8 | KillMode=process 9 | Restart=always 10 | 11 | [Install] 12 | WantedBy=multi-user.target 13 | 14 | -------------------------------------------------------------------------------- /gvpe/conf.d/if-up: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | export PATH="${PATH}:/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin" 4 | 5 | set -e 6 | 7 | ME="$(basename ${0})" 8 | 9 | 10 | # ----------------------------------------------------------------------------- 11 | # show what is happening 12 | 13 | echo >&2 "${ME} on '${NODENAME}' (${NODEID}), interface '${IFNAME}' (type '${IFTYPE}', subtype '${IFSUBTYPE}', mac '${MAC}', mtu '${MTU}', ifupdata '${IFUPDATA}'), config '${CONFBASE}', nodes ${NODES}" 14 | 15 | 16 | # ----------------------------------------------------------------------------- 17 | # functions 18 | 19 | run() { 20 | printf >&2 " > " 21 | printf >&2 "%q " "${@}" 22 | printf >&2 "\n" 23 | "${@}" 24 | return $? 25 | } 26 | 27 | # ----------------------------------------------------------------------------- 28 | # parse the IP and NET from ${IFUPDATA} 29 | 30 | NET="$(echo "${IFUPDATA}" | cut -d '|' -f 1)" 31 | IP="$(echo "${IFUPDATA}" | cut -d '|' -f 2)" 32 | echo >&2 "IP : ${IP}" 33 | echo >&2 "NET : ${NET}" 34 | 35 | # ----------------------------------------------------------------------------- 36 | # bring the interface up 37 | 38 | os="$(uname -s)" 39 | case "${os}" in 40 | Linux) 41 | run ip link set ${IFNAME} address ${MAC} mtu ${MTU} up 42 | run ip addr add ${IP} dev ${IFNAME} 43 | run ip route replace ${NET} dev ${IFNAME} || \ 44 | run ip route add ${NET} dev ${IFNAME} 45 | ;; 46 | 47 | FreeBSD) 48 | run ifconfig ${IFNAME} inet ${IP}/${NET/*\/} up description "GVPE tunnel" 49 | ;; 50 | 51 | *) 52 | echo >&2 "Unknown operating system '${os}'. Configuration may be incomplete." 53 | ;; 54 | esac 55 | 56 | # ----------------------------------------------------------------------------- 57 | # we just connected, mark all nodes down 58 | 59 | cd ${CONFBASE}/status 60 | active_nodes=0 61 | while read x 62 | do 63 | active_nodes=$((active_nodes + 1)) 64 | 65 | [ "${x}" = "${MYNODENAME}" ] && continue 66 | [ ! -f "${x}" ] && echo >&2 "File '${x}' missing!" && continue 67 | 68 | source "${x}.reset" 69 | #eval "mac=\${MAC_${nodeid}-${mac}}; ifupdata=\${IFUPDATA_${nodeid}-${ifupdata}}" 70 | eval "mac=\${MAC_${nodeid}-${mac}}" 71 | cat >${x} <${CONFBASE}/status/${NODENAME} <${CONFBASE}/status/status <&2 "${ME}: '${STATE}' event for '${DESTNODE}' (${DESTID}) at '${DESTSI}' (ip '${DESTIP}', port '${DESTPORT}'), running on '${NODENAME}' (${NODEID}), interface '${IFNAME}' (type '${IFTYPE}', subtype '${IFSUBTYPE}', mac '${MAC}', mtu '${MTU}', ifupdata '${IFUPDATA}'), config '${CONFBASE}', nodes ${NODES}" 11 | 12 | 13 | [ -f ${CONFBASE}/status/${DESTNODE} ] && \ 14 | . ${CONFBASE}/status/${DESTNODE} 15 | 16 | cat >${CONFBASE}/status/${DESTNODE} <${CONFBASE}/status/status <&2 " > " 11 | printf >&2 "%q " "${@}" 12 | printf >&2 "\n" 13 | "${@}" 14 | return $? 15 | } 16 | 17 | CONFBASE=${1-/etc/gvpe} 18 | 19 | 20 | # ----------------------------------------------------------------------------- 21 | # generate the local GVPE configuration overrides file 22 | 23 | if [ ! -f ${CONFBASE}/local.conf ] 24 | then 25 | cat >${CONFBASE}/local.conf <${CONFBASE}/routing.conf <${CONFBASE}/if-up.local <${CONFBASE}/node-up.local <${CONFBASE}/node-changed.local <${CONFBASE}/node-down.local <&2 "FAILED TO RESTART gvpe" 162 | exit 1 163 | fi 164 | 165 | exit 0 166 | -------------------------------------------------------------------------------- /gvpe/nodes.conf: -------------------------------------------------------------------------------- 1 | # ----------------------------------------------------------------------------- 2 | # configuration 3 | 4 | BASE_IP="172.16.254" 5 | 6 | # The CIDR of the entire VPN network 7 | VPN_NETWORK="${BASE_IP}.0/24" 8 | 9 | # The default port - each node may use a different 10 | PORT="49999" 11 | 12 | # when you want to create random keys for the disabled 13 | # hosts, set this to 1 14 | RANDOMIZE_KEYS_OF_DISABLED_HOSTS=0 15 | 16 | # set this to 1, to delete all private and public keys 17 | # and regenerate them 18 | # 19 | # DANGEROUS: 20 | # if gvpe on any host fails to restart during activation 21 | # the host will not re-connect to the VPN 22 | REGENARATE_ALL_KEYS=0 23 | 24 | 25 | # PUBLIC IP can be: 26 | # this is this IP all the other nodes should use 27 | # dynamic prevent other nodes from initiating connections to this node. 28 | # also prevents this node from being used are a router 29 | 30 | # SSH IP can be: 31 | # use this IP for SSHing to the node 32 | # use the PUBLIC IP for SSHing to the node 33 | # vpn use the VIRTUAL IP (the VPN) for SSHing to the node 34 | # localhost this node is the localhost 35 | # none do not provision this node 36 | 37 | # HOSTNAME PUBLIC IP : PORT VIRTUAL IP O/S SSH IP PROTOCOLS 38 | node box 94.71.88.100:${PORT} ${BASE_IP}.1 linux '' any 39 | node boxe 94.64.79.64:${PORT} ${BASE_IP}.2 linux '' any 40 | node costa dynamic:$((PORT - 1)) ${BASE_IP}.3 linux 'localhost' any 41 | node london 139.59.166.55:${PORT} ${BASE_IP}.10 linux '' any 42 | node atlanta none none none 'none' any 43 | node west-europe 13.93.125.124:${PORT} ${BASE_IP}.30 linux '' any 44 | node bangalore 139.59.0.212:${PORT} ${BASE_IP}.40 linux '' any 45 | node frankfurt 46.101.193.115:${PORT} ${BASE_IP}.50 linux '' any 46 | node sanfrancisco 104.236.149.236:${PORT} ${BASE_IP}.60 linux '' any 47 | node toronto 159.203.30.96:${PORT} ${BASE_IP}.70 linux '' any 48 | node singapore 128.199.80.131:${PORT} ${BASE_IP}.80 linux '' any 49 | node newyork 162.243.236.205:${PORT} ${BASE_IP}.90 linux '' any 50 | node aws-fra 35.156.164.190:${PORT} ${BASE_IP}.100 linux '' any 51 | node build none none none 'none' any 52 | node freebsd none none none 'none' any 53 | node ventureer none none none 'none' any 54 | node stackscale none none none 'none' any 55 | node cdn77 185.152.66.101:${PORT} ${BASE_IP}.150 linux '' any 56 | node octopuscs none none none 'none' any 57 | node london3 138.68.182.52:${PORT} ${BASE_IP}.11 linux '' any 58 | 59 | # generate all configuration files locally 60 | configure 61 | 62 | # push all configuration files to all nodes 63 | provision 64 | #node_provision_files costa 65 | 66 | # restart gvpe on all nodes 67 | activate 68 | #node_setup costa 69 | 70 | # calculate the routing order on all nodes 71 | #sleep 10 72 | #save_routing_order 73 | -------------------------------------------------------------------------------- /gvpe/provision-gvpe.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | RANDOMIZE_KEYS_OF_DISABLED_HOSTS=0 6 | REGENARATE_ALL_KEYS=0 7 | 8 | STATIC_NODE_ROUTER_PRIORITY="1" 9 | DYNAMIC_NODE_ROUTER_PRIORITY="0" 10 | 11 | STATIC_NODE_CONNECT="always" 12 | DYNAMIC_NODE_CONNECT="ondemand" 13 | DISABLED_NODE_CONNECT="disabled" 14 | 15 | ME="$(realpath ${0})" 16 | NULL= 17 | 18 | [ ! -d ids ] && mkdir -p ids 19 | [ ! -d keys ] && mkdir -p keys 20 | [ ! -d conf.d ] && mkdir -p conf.d 21 | [ ! -d conf.d/pubkey ] && mkdir -p conf.d/pubkey 22 | [ ! -d conf.d/status ] && mkdir -p conf.d/status 23 | 24 | # clean up 25 | rm conf.d/status/* 2>/dev/null || echo >/dev/null 26 | rm conf.d/pubkey/* 2>/dev/null || echo >/dev/null 27 | 28 | run() { 29 | printf >&2 " > " 30 | printf >&2 "%q " "${@}" 31 | printf >&2 "\n" 32 | 33 | "${@}" 34 | return $? 35 | } 36 | 37 | prepare_configuration() { 38 | cat >conf.d/gvpe.conf <conf.d/gvpe.conf.end <conf.d/hosts.real <conf.d/hosts.vpn <&2 "Unknown O/S '${os}'" 184 | exit 1 185 | ;; 186 | esac 187 | 188 | if [ -z "${p}" -o "${p}" = "none" -o -z "${vip}" -o "${vip}" = "none" -o "${proto}" = "none" ] 189 | then 190 | p="none" 191 | pip="none" 192 | vip="none" 193 | sip="none" 194 | proto="none" 195 | port="0" 196 | ifname="none" 197 | fi 198 | 199 | ifupdata="${VPN_NETWORK}|${vip}" 200 | 201 | case "${pip}" in 202 | none) 203 | connect="${DISABLED_NODE_CONNECT}" 204 | router_priority="${DYNAMIC_NODE_ROUTER_PRIORITY}" 205 | ;; 206 | 207 | dynamic) 208 | [ -z "${sip}" ] && sip="${vip}" 209 | connect="${DYNAMIC_NODE_CONNECT}" 210 | router_priority="${DYNAMIC_NODE_ROUTER_PRIORITY}" 211 | ;; 212 | 213 | *) 214 | connect="${STATIC_NODE_CONNECT}" 215 | router_priority="${STATIC_NODE_ROUTER_PRIORITY}" 216 | ;; 217 | esac 218 | 219 | [ "${sip}" = "vpn" ] && sip="${vip}" 220 | [ -z "${sip}" ] && sip="${pip}" 221 | 222 | if [ ! -z "${unique_names[${name}]}" ] 223 | then 224 | echo >&2 "Name '${name}' for IP ${pip} already exists with IP ${unique_names[${name}]}." 225 | exit 1 226 | fi 227 | 228 | if [ "${pip}" != "none" -a "${pip}" != "dynamic" -a ! -z "${unique_pips[${pip}]}" ] 229 | then 230 | echo >&2 "Public IP '${pip}' for ${name} already exists for ${unique_pips[${pip}]}." 231 | exit 1 232 | fi 233 | 234 | if [ "${vip}" != "none" -a ! -z "${unique_vips[${vip}]}" ] 235 | then 236 | echo >&2 "VPN IP '${vip}' for ${name} already exists for ${unique_vips[${vip}]}." 237 | exit 1 238 | fi 239 | 240 | unique_names[${name}]="${pip}" 241 | unique_pips[${pip}]="${name}" 242 | unique_vips[${vip}]="${name}" 243 | 244 | if [ -f ids/${name} ] 245 | then 246 | gvpe_id[${name}]=$(ids/.max 250 | gvpe_id[${name}]=${max_id} 251 | fi 252 | echo "${gvpe_id[${name}]}" >ids/${name} 253 | 254 | if [ ! -z "${gvpe_name_by_id[${gvpe_id[${name}]}]}" ] 255 | then 256 | echo >&2 "Node '${name}' gets ID ${gvpe_id[${name}]} that points to node '${gvpe_name_by_id[${gvpe_id[${name}]}]}'" 257 | exit 1 258 | fi 259 | gvpe_name_by_id[${gvpe_id[${name}]}]=${name} 260 | 261 | gvpe_name[${name}]="${name}" 262 | gvpe_os[${name}]="${os}" 263 | gvpe_vip[${name}]="${vip}" 264 | gvpe_sip[${name}]="${sip}" 265 | gvpe_pip[${name}]="${pip}" 266 | gvpe_port[${name}]="${port}" 267 | gvpe_ifname[${name}]="${ifname}" 268 | gvpe_ifupdata[${name}]="${ifupdata}" 269 | gvpe_connect[${name}]="${connect}" 270 | gvpe_router_priority[${name}]="${router_priority}" 271 | 272 | local x fproto= 273 | for x in ${proto//,/ } 274 | do 275 | case "${x}" in 276 | any|all) 277 | ;; 278 | 279 | tcp|udp|rawip|icmp|none) 280 | fproto="${fproto} ${x}" 281 | ;; 282 | 283 | *) 284 | echo >&2 "Ignoring unknown protocol: ${x}" 285 | ;; 286 | esac 287 | done 288 | gvpe_proto[${name}]="${fproto}" 289 | } 290 | 291 | foreach_node() { 292 | local callback="${1}" name 293 | 294 | for name in "${gvpe_name_by_id[@]}" 295 | do 296 | # echo >&2 "Calling ${callback} for ${name} (${gvpe_id[${name}]})" 297 | ${callback} "${name}" 298 | done 299 | } 300 | 301 | parallel_callback() { 302 | local lock="${1}" parallel_failed=0 303 | shift 1 304 | 305 | ( 306 | flock -n 99 || exit 1 307 | # ... commands executed under lock ... 308 | 309 | "${@}" >"${lock}.out" 2>&1 || parallel_failed=1 310 | 311 | echo >&99 "${parallel_failed}" 312 | ) 99>"${lock}" 313 | } 314 | 315 | foreach_node_parallel() { 316 | local callback="${1}" name 317 | local locked=0 failed l hosts= failed_hosts= 318 | 319 | local confd="$(run mktemp -d /tmp/gvpe-parallel-runner-XXXXXXXXXX)" 320 | 321 | for name in "${gvpe_name_by_id[@]}" 322 | do 323 | locked=$((locked + 1)) 324 | hosts="${hosts} ${name}" 325 | # echo >&2 "Calling ${callback} for ${name} (${gvpe_id[${name}]})" 326 | parallel_callback "${confd}/${name}" ${callback} "${name}" & 327 | done 328 | 329 | while [ $locked -gt 0 ] 330 | do 331 | echo >&2 "waiting for ${locked} hosts to finish (${hosts} )..." 332 | sleep 2 333 | 334 | locked=0 335 | hosts= 336 | for name in "${gvpe_name_by_id[@]}" 337 | do 338 | l=0 339 | flock --nonblock "${confd}/${name}" -c "echo >/dev/null" || l=1 340 | [ ${l} -eq 1 ] && hosts="${hosts} ${name}" && locked=$((locked + 1)) 341 | done 342 | done 343 | 344 | failed=0 345 | hosts= 346 | for name in "${gvpe_name_by_id[@]}" 347 | do 348 | { 349 | echo 350 | echo 351 | echo " --- BEGIN OUTPUT of ${callback} ${name} on $(date) --- " 352 | cat "${confd}/${name}.out" 353 | echo " --- END OUTPUT of ${callback} ${name} --- " 354 | echo 355 | echo 356 | } >>gvpe.log 357 | 358 | 359 | l="$(cat "${confd}/${name}")" 360 | if [ "${l}" != "0" ] 361 | then 362 | failed_hosts="${failed_hosts} ${name}" 363 | echo >&2 364 | echo >&2 365 | echo >&2 366 | echo >&2 "HOST ${name} FAILED !!!" 367 | echo >&2 368 | echo >&2 " --- BEGIN OUTPUT of ${callback} ${name} --- " 369 | cat "${confd}/${name}.out" 370 | echo >&2 " --- END OUTPUT of ${callback} ${name} --- " 371 | failed=$(( failed + 1 )) 372 | fi 373 | done 374 | 375 | run rm -rf "${confd}" 376 | 377 | if [ ${failed} -gt 0 ] 378 | then 379 | echo >&2 380 | echo >&2 "${failed} jobs failed ( ${failed_hosts} )" 381 | return 1 382 | else 383 | echo >&2 "all jobs report success" 384 | return 0 385 | fi 386 | } 387 | 388 | node_status_file() { 389 | local name="${1}" status="disabled" 390 | 391 | if [ "${gvpe_os[${name}]}" != "none" ] 392 | then 393 | echo "${name}" >>conf.d/status/nodes 394 | status="down" 395 | fi 396 | 397 | cat >conf.d/status/${name}.reset <&2 "Invalid protocol: ${x}";; 449 | esac 450 | done 451 | fi 452 | 453 | cat >>conf.d/gvpe.conf <>conf.d/gvpe.conf.end <&2 "generating keys for: ${name}" 491 | cd keys 492 | run ../sbin.linux/gvpectrl -c ../conf.d -g ${name} 493 | cd .. 494 | fi 495 | 496 | run cp -p keys/${name} conf.d/pubkey/${name} 497 | } 498 | 499 | node_hosts() { 500 | local name="${1}" 501 | 502 | if [ "${gvpe_pip[${name}]}" != "none" -a "${gvpe_pip[${name}]}" != "dynamic" ] 503 | then 504 | printf "%-15s %s\n" "${gvpe_pip[${name}]}" "${name}" >>conf.d/hosts.real 505 | fi 506 | if [ "${gvpe_vip[${name}]}" != "none" ] 507 | then 508 | printf "%-15s %s\n" "${gvpe_vip[${name}]}" "${name}" >>conf.d/hosts.vpn 509 | fi 510 | } 511 | 512 | node_provision_files() { 513 | local name="${1}" 514 | 515 | local confd="$(run mktemp -d /tmp/gvpe-${name}-XXXXXXXXXX)" 516 | [ -z "${confd}" ] && echo >&2 "Cannot create temporary directory" && return 1 517 | 518 | rsync -HaSPv conf.d/ "${confd}/" 519 | 520 | echo "${name}" >${confd}/hostname 521 | run cp keys/${name}.privkey ${confd}/hostkey 522 | [ -f "gvpe-conf-d-on-${name}.tar.gz" ] && run rm "gvpe-conf-d-on-${name}.tar.gz" 523 | run tar -zcpf "gvpe-conf-d-on-${name}.tar.gz" ${confd}/ 524 | 525 | # do not provision hosts with O/S set to 'none' 526 | if [ "${gvpe_os[${name}]}" != "none" -a "${gvpe_sip[${name}]}" != "none" ] 527 | then 528 | echo >&2 529 | echo >&2 "Provisioning: ${name} (${gvpe_sip[${name}]})" 530 | 531 | if [ "${gvpe_sip[${name}]}" = "localhost" ] 532 | then 533 | run sudo rsync -HaSPv sbin/ /usr/local/sbin/ 534 | run sudo rsync -HaSPv sbin.${gvpe_os[${name}]}/ /usr/local/sbin/ 535 | run sudo rsync -HaSPv ${confd}/ /etc/gvpe/ 536 | else 537 | run rsync -HaSPv sbin/ -e "ssh" --rsync-path="\`which sudo\` rsync" ${gvpe_sip[${name}]}:/usr/local/sbin/ 538 | run rsync -HaSPv sbin.${gvpe_os[${name}]}/ -e "ssh" --rsync-path="\`which sudo\` rsync" ${gvpe_sip[${name}]}:/usr/local/sbin/ 539 | run rsync -HaSPv ${confd}/ -e "ssh" --rsync-path="\`which sudo\` rsync" ${gvpe_sip[${name}]}:/etc/gvpe/ 540 | fi 541 | else 542 | echo >&2 "node ${name} is disabled." 543 | fi 544 | 545 | run rm -rf "${confd}" 546 | return 0 547 | } 548 | 549 | node_setup() { 550 | local name="${1}" 551 | 552 | if [ "${gvpe_os[${name}]}" != "none" -a "${gvpe_sip[${name}]}" != "none" ] 553 | then 554 | echo >&2 555 | echo >&2 "Setting up GVPE on: ${name} (${gvpe_sip[${name}]})" 556 | 557 | if [ "${gvpe_sip[${name}]}" = "localhost" ] 558 | then 559 | # it will sudo by itself if needed 560 | run /etc/gvpe/setup.sh /etc/gvpe 561 | else 562 | # it will sudo by itself if needed 563 | run ssh "${gvpe_sip[${name}]}" "/etc/gvpe/setup.sh /etc/gvpe" 564 | fi 565 | else 566 | echo >&2 "node ${name} is disabled." 567 | fi 568 | } 569 | 570 | node_routing_order() { 571 | local name="${1}" 572 | 573 | if [ "${gvpe_os[${name}]}" != "none" -a "${gvpe_sip[${name}]}" != "none" ] 574 | then 575 | echo >&2 576 | echo >&2 "Calculating GVPE routing order on: ${name} (${gvpe_sip[${name}]})" 577 | 578 | if [ "${gvpe_sip[${name}]}" = "localhost" ] 579 | then 580 | # it will sudo by itself if needed 581 | run sudo /usr/local/sbin/gvpe-routing-order.sh 582 | else 583 | run ssh "${gvpe_sip[${name}]}" "\`which sudo\` /usr/local/sbin/gvpe-routing-order.sh" 584 | fi 585 | else 586 | echo >&2 "node ${name} is disabled." 587 | fi 588 | } 589 | 590 | configure() { 591 | echo >&2 592 | echo >&2 593 | echo >&2 " --- CONFIGURING ALL NODES ---" 594 | echo >&2 595 | 596 | local c=0 597 | while [ ${c} -lt ${max_id} ] 598 | do 599 | c=$((c + 1)) 600 | if [ -z "${gvpe_name_by_id[${c}]}" ] 601 | then 602 | echo >&2 "Missing id ${c}. Please don't remove nodes. Disable them." 603 | exit 1 604 | fi 605 | done 606 | 607 | # generate the headers of configuration files 608 | prepare_configuration 609 | 610 | # generate needed files 611 | foreach_node node_status_file 612 | foreach_node node_gvpe_conf 613 | foreach_node node_hosts 614 | foreach_node node_keys 615 | 616 | # finalize the files 617 | cat conf.d/gvpe.conf.end >>conf.d/gvpe.conf 618 | cat >>conf.d/gvpe.conf <>conf.d/hosts.real 627 | echo "# END gvpe vpn" >>conf.d/hosts.vpn 628 | 629 | 630 | echo >&2 631 | echo >&2 " --- CONFIGURED ALL NODES ---" 632 | echo >&2 633 | } 634 | 635 | provision() { 636 | echo >&2 637 | echo >&2 638 | echo >&2 " --- PROVISIONING ALL NODES ---" 639 | echo >&2 640 | 641 | # provision files 642 | foreach_node_parallel node_provision_files 643 | 644 | echo >&2 645 | echo >&2 " --- PROVISIONED ALL NODES ---" 646 | echo >&2 647 | } 648 | 649 | activate() { 650 | echo >&2 651 | echo >&2 652 | echo >&2 " --- ACTIVATING ALL NODES ---" 653 | echo >&2 654 | 655 | # setup nodes 656 | foreach_node_parallel node_setup 657 | 658 | echo >&2 659 | echo >&2 " --- ACTIVATED ALL NODES ---" 660 | echo >&2 661 | } 662 | 663 | save_routing_order() { 664 | echo >&2 665 | echo >&2 666 | echo >&2 " --- EVALUATING ROUTING ORDER ON ALL NODES ---" 667 | echo >&2 668 | 669 | # setup nodes 670 | foreach_node_parallel node_routing_order 671 | 672 | echo >&2 673 | echo >&2 " --- EVALUATED ROUTING ORDER ON ALL NODES ---" 674 | echo >&2 675 | } 676 | 677 | source nodes.conf 678 | exit $? 679 | -------------------------------------------------------------------------------- /gvpe/sbin.freebsd/gvpe: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/netdata/netdata-demo-site/14d68e93738a2e4bfbc111331dc44f1b6953c50a/gvpe/sbin.freebsd/gvpe -------------------------------------------------------------------------------- /gvpe/sbin.freebsd/gvpectrl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/netdata/netdata-demo-site/14d68e93738a2e4bfbc111331dc44f1b6953c50a/gvpe/sbin.freebsd/gvpectrl -------------------------------------------------------------------------------- /gvpe/sbin.linux/gvpe: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/netdata/netdata-demo-site/14d68e93738a2e4bfbc111331dc44f1b6953c50a/gvpe/sbin.linux/gvpe -------------------------------------------------------------------------------- /gvpe/sbin.linux/gvpectrl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/netdata/netdata-demo-site/14d68e93738a2e4bfbc111331dc44f1b6953c50a/gvpe/sbin.linux/gvpectrl -------------------------------------------------------------------------------- /gvpe/sbin/gvpe-routing-order.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | export PATH="${PATH}:/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin" 4 | export LC_ALL=C 5 | 6 | set -e 7 | ME="${0}" 8 | 9 | run() { 10 | printf >&2 " > " 11 | printf >&2 "%q " "${@}" 12 | printf >&2 "\n" 13 | "${@}" 14 | return $? 15 | } 16 | 17 | cd /etc/gvpe/status 18 | 19 | if [ ! -f ./status ] 20 | then 21 | echo >&2 "Is GVPE running?" 22 | exit 1 23 | fi 24 | 25 | EVENTS=0 26 | NODES_ALL=0 27 | NODES_UP=0 28 | NODES_DOWN=0 29 | timestamp="NEVER" 30 | . ./status 31 | 32 | if [ "${timestamp}" = "NEVER" ] 33 | then 34 | echo >&2 "GVPE is not connected" 35 | exit 1 36 | fi 37 | 38 | cat <&2 "pinging ${name} (${ip})" 58 | echo "$(run ping -c 3 -n -q "${ip}" | tail -n 1 | cut -d '=' -f 2 | cut -d '/' -f 2)|${name}" 59 | done ../routing.conf.new 70 | 71 | run cp ../routing.conf ../routing.conf.old 72 | run mv ../routing.conf.new ../routing.conf 73 | -------------------------------------------------------------------------------- /gvpe/sbin/gvpe-status.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | export PATH="${PATH}:/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin" 4 | 5 | set -e 6 | ME="${0}" 7 | 8 | cd /etc/gvpe/status 9 | 10 | if [ ! -f ./status ] 11 | then 12 | echo >&2 "Is GVPE running?" 13 | exit 1 14 | fi 15 | 16 | EVENTS=0 17 | NODES_ALL=0 18 | NODES_UP=0 19 | NODES_DOWN=0 20 | NODES_DISABLED=0 21 | timestamp="NEVER" 22 | . ./status 23 | 24 | if [ "${timestamp}" = "NEVER" ] 25 | then 26 | echo >&2 "GVPE is not connected" 27 | exit 1 28 | fi 29 | 30 | cat <&2 "File '${x}' missing!" && continue 48 | 49 | . ./${x} 50 | 51 | remote="${rip}" 52 | if [ "${status}" = "up" ] 53 | then 54 | remote="${si}" 55 | [ ! -z "${pip}" -a "${pip}" != "dynamic" -a "${rip}" != "${pip}" ] && status="routed" 56 | else 57 | remote="${pip}" 58 | fi 59 | 60 | printf "%3u %-25s %-15s %-25s %-6s %-20s\n" \ 61 | "$((nodeid))" "${name}" "${ip}" "${remote}" "${status}" \ 62 | "$(date -r "./${x}" "+%Y-%m-%d %H:%M:%S")" 63 | done &2 "Unknown action '${ACTION}'" 49 | exit 1 50 | ;; 51 | esac 52 | 53 | -------------------------------------------------------------------------------- /install-all-firehol.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Copyright 2016-2017 Costa Tsaousis 4 | # GPL v3+ 5 | # 6 | # This script downloads and installs all firehol tools: 7 | # 8 | # - iprange 9 | # - firehol, fireqos, link-balancer, vnetbuild, update-ipsets 10 | # - netdata 11 | # 12 | # Their sources will be placed in /usr/src in a subdir each. 13 | # 14 | # All tools are installed system-wide. 15 | # The script can also update them (just run it again). 16 | 17 | for x in iprange firehol netdata 18 | do 19 | if [ ! -d /usr/src/${x}.git ] 20 | then 21 | echo "Downloading (git clone) ${x}..." 22 | git clone https://github.com/firehol/${x}.git /usr/src/${x}.git || exit 1 23 | else 24 | echo "Downloading (git pull) ${x}..." 25 | cd /usr/src/${x}.git || exit 1 26 | git pull || exit 1 27 | fi 28 | done 29 | 30 | echo 31 | echo "Building iprange..." 32 | cd /usr/src/iprange.git || exit 1 33 | ./autogen.sh || exit 1 34 | ./configure --prefix=/usr CFLAGS="-O2" --disable-man || exit 1 35 | make clean 36 | make || exit 1 37 | make install || exit 1 38 | 39 | echo 40 | echo "Building firehol..." 41 | cd /usr/src/firehol.git || exit 1 42 | ./autogen.sh || exit 1 43 | ./configure --prefix=/usr --sysconfdir=/etc --localstatedir=/var --disable-man --disable-doc || exit 1 44 | make clean 45 | make || exit 1 46 | make install || exit 1 47 | 48 | echo 49 | echo "Building netdata..." 50 | cd /usr/src/netdata.git || exit 1 51 | ./netdata-installer.sh --dont-wait 52 | 53 | exit $? 54 | -------------------------------------------------------------------------------- /install-required-packages.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | export PATH="${PATH}:/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin" 4 | export LC_ALL=C 5 | 6 | # Be nice on production environments 7 | renice 19 $$ >/dev/null 2>/dev/null 8 | 9 | ME="${0}" 10 | 11 | if [ "${BASH_VERSINFO[0]}" -lt "4" ] 12 | then 13 | echo >&2 "Sorry! This script needs BASH version 4+, but you have BASH version ${BASH_VERSION}" 14 | exit 1 15 | fi 16 | 17 | # These options control which packages we are going to install 18 | # They can be pre-set, but also can be controlled with command line options 19 | PACKAGES_NETDATA=${PACKAGES_NETDATA-0} 20 | PACKAGES_NETDATA_NODEJS=${PACKAGES_NETDATA_NODEJS-0} 21 | PACKAGES_NETDATA_PYTHON=${PACKAGES_NETDATA_PYTHON-0} 22 | PACKAGES_NETDATA_PYTHON3=${PACKAGES_NETDATA_PYTHON3-0} 23 | PACKAGES_NETDATA_PYTHON_MYSQL=${PACKAGES_NETDATA_PYTHON_MYSQL-0} 24 | PACKAGES_NETDATA_PYTHON_POSTGRES=${PACKAGES_NETDATA_PYTHON_POSTGRES-0} 25 | PACKAGES_NETDATA_PYTHON_MONGO=${PACKAGES_NETDATA_PYTHON_MONGO-0} 26 | PACKAGES_DEBUG=${PACKAGES_DEBUG-0} 27 | PACKAGES_IPRANGE=${PACKAGES_IPRANGE-0} 28 | PACKAGES_FIREHOL=${PACKAGES_FIREHOL-0} 29 | PACKAGES_FIREQOS=${PACKAGES_FIREQOS-0} 30 | PACKAGES_UPDATE_IPSETS=${PACKAGES_UPDATE_IPSETS-0} 31 | PACKAGES_NETDATA_DEMO_SITE=${PACKAGES_NETDATA_DEMO_SITE-0} 32 | PACKAGES_NETDATA_SENSORS=${PACKAGES_NETDATA_SENSORS-0} 33 | PACKAGES_NETDATA_DATABASE=${PACKAGES_NETDATA_DATABASE-0} 34 | 35 | # needed commands 36 | lsb_release=$(which lsb_release 2>/dev/null || command -v lsb_release 2>/dev/null) 37 | 38 | # Check which package managers are available 39 | apk=$(which apk 2>/dev/null || command -v apk 2>/dev/null) 40 | apt_get=$(which apt-get 2>/dev/null || command -v apt-get 2>/dev/null) 41 | dnf=$(which dnf 2>/dev/null || command -v dnf 2>/dev/null) 42 | emerge=$(which emerge 2>/dev/null || command -v emerge 2>/dev/null) 43 | equo=$(which equo 2>/dev/null || command -v equo 2>/dev/null) 44 | pacman=$(which pacman 2>/dev/null || command -v pacman 2>/dev/null) 45 | yum=$(which yum 2>/dev/null || command -v yum 2>/dev/null) 46 | zypper=$(which zypper 2>/dev/null || command -v zypper 2>/dev/null) 47 | 48 | distribution= 49 | version= 50 | codename= 51 | package_installer= 52 | tree= 53 | detection= 54 | NAME= 55 | ID= 56 | ID_LIKE= 57 | VERSION= 58 | VERSION_ID= 59 | 60 | usage() { 61 | cat <&2 "Loading ${file} ..." 143 | 144 | 145 | x="$(cat "${file}" | grep -v "^$" | head -n 1)" 146 | 147 | if [[ "${x}" =~ ^.*[[:space:]]+Linux[[:space:]]+release[[:space:]]+.*[[:space:]]+(.*)[[:space:]]*$ ]] 148 | then 149 | eval "$(echo "${x}" | sed "s|^\(.*\)[[:space:]]\+Linux[[:space:]]\+release[[:space:]]\+\(.*\)[[:space:]]\+(\(.*\))[[:space:]]*$|DISTRIB_ID=\"\1\"\nDISTRIB_RELEASE=\"\2\"\nDISTRIB_CODENAME=\"\3\"|g" | grep "^DISTRIB")" 150 | elif [[ "${x}" =~ ^.*[[:space:]]+Linux[[:space:]]+release[[:space:]]+.*[[:space:]]+$ ]] 151 | then 152 | eval "$(echo "${x}" | sed "s|^\(.*\)[[:space:]]\+Linux[[:space:]]\+release[[:space:]]\+\(.*\)[[:space:]]*$|DISTRIB_ID=\"\1\"\nDISTRIB_RELEASE=\"\2\"|g" | grep "^DISTRIB")" 153 | elif [[ "${x}" =~ ^.*[[:space:]]+release[[:space:]]+.*[[:space:]]+(.*)[[:space:]]*$ ]] 154 | then 155 | eval "$(echo "${x}" | sed "s|^\(.*\)[[:space:]]\+release[[:space:]]\+\(.*\)[[:space:]]\+(\(.*\))[[:space:]]*$|DISTRIB_ID=\"\1\"\nDISTRIB_RELEASE=\"\2\"\nDISTRIB_CODENAME=\"\3\"|g" | grep "^DISTRIB")" 156 | elif [[ "${x}" =~ ^.*[[:space:]]+release[[:space:]]+.*[[:space:]]+$ ]] 157 | then 158 | eval "$(echo "${x}" | sed "s|^\(.*\)[[:space:]]\+release[[:space:]]\+\(.*\)[[:space:]]*$|DISTRIB_ID=\"\1\"\nDISTRIB_RELEASE=\"\2\"|g" | grep "^DISTRIB")" 159 | fi 160 | 161 | distribution="${DISTRIB_ID}" 162 | version="${DISTRIB_RELEASE}" 163 | codename="${DISTRIB_CODENAME}" 164 | 165 | [ -z "${distribution}" ] && echo >&2 "Cannot parse this lsb-release: ${x}" && return 1 166 | detection="${file}" 167 | return 0 168 | } 169 | 170 | get_os_release() { 171 | # Loads the /etc/os-release file 172 | # Only the required fields are loaded 173 | # 174 | # If it manages to load /etc/os-release, it returns 0 175 | # otherwise it returns 1 176 | # 177 | # It searches the ID_LIKE field for a compatible distribution 178 | 179 | local x 180 | if [ -f "/etc/os-release" ] 181 | then 182 | echo >&2 "Loading /etc/os-release ..." 183 | 184 | eval "$(cat /etc/os-release | grep -E "^(NAME|ID|ID_LIKE|VERSION|VERSION_ID)=")" 185 | for x in "${ID}" ${ID_LIKE} 186 | do 187 | case "${x,,}" in 188 | alpine|arch|centos|debian|fedora|gentoo|sabayon|rhel|ubuntu|suse|opensuse-leap|sles) 189 | distribution="${x}" 190 | version="${VERSION_ID}" 191 | codename="${VERSION}" 192 | detection="/etc/os-release" 193 | break 194 | ;; 195 | *) 196 | echo >&2 "Unknown distribution ID: ${x}" 197 | ;; 198 | esac 199 | done 200 | [ -z "${distribution}" ] && echo >&2 "Cannot find valid distribution in: ${ID} ${ID_LIKE}" && return 1 201 | else 202 | echo >&2 "Cannot find /etc/os-release" && return 1 203 | fi 204 | 205 | [ -z "${distribution}" ] && return 1 206 | return 0 207 | } 208 | 209 | get_lsb_release() { 210 | # Loads the /etc/lsb-release file 211 | # If it fails, it attempts to run the command: lsb_release -a 212 | # and parse its output 213 | # 214 | # If it manages to find the lsb-release, it returns 0 215 | # otherwise it returns 1 216 | 217 | if [ -f "/etc/lsb-release" ] 218 | then 219 | echo >&2 "Loading /etc/lsb-release ..." 220 | local DISTRIB_ID= ISTRIB_RELEASE= DISTRIB_CODENAME= DISTRIB_DESCRIPTION= 221 | eval "$(cat /etc/lsb-release | grep -E "^(DISTRIB_ID|DISTRIB_RELEASE|DISTRIB_CODENAME)=")" 222 | distribution="${DISTRIB_ID}" 223 | version="${DISTRIB_RELEASE}" 224 | codename="${DISTRIB_CODENAME}" 225 | detection="/etc/lsb-release" 226 | fi 227 | 228 | if [ -z "${distribution}" -a ! -z "${lsb_release}" ] 229 | then 230 | echo >&2 "Cannot find distribution with /etc/lsb-release" 231 | echo >&2 "Running command: lsb_release ..." 232 | eval "declare -A release=( $(lsb_release -a 2>/dev/null | sed -e "s|^\(.*\):[[:space:]]*\(.*\)$|[\1]=\"\2\"|g") )" 233 | distribution="${release[Distributor ID]}" 234 | version="${release[Release]}" 235 | codename="${release[Codename]}" 236 | detection="lsb_release" 237 | fi 238 | 239 | [ -z "${distribution}" ] && echo >&2 "Cannot find valid distribution with lsb-release" && return 1 240 | return 0 241 | } 242 | 243 | find_etc_any_release() { 244 | # Check for any of the known /etc/x-release files 245 | # If it finds one, it loads it and returns 0 246 | # otherwise it returns 1 247 | 248 | if [ -f "/etc/arch-release" ] 249 | then 250 | release2lsb_release "/etc/arch-release" && return 0 251 | fi 252 | 253 | if [ -f "/etc/centos-release" ] 254 | then 255 | release2lsb_release "/etc/centos-release" && return 0 256 | fi 257 | 258 | if [ -f "/etc/redhat-release" ] 259 | then 260 | release2lsb_release "/etc/redhat-release" && return 0 261 | fi 262 | 263 | if [ -f "/etc/SuSe-release" ] 264 | then 265 | release2lsb_release "/etc/SuSe-release" && return 0 266 | fi 267 | 268 | return 1 269 | } 270 | 271 | autodetect_distribution() { 272 | # autodetection of distribution 273 | get_os_release || get_lsb_release || find_etc_any_release 274 | } 275 | 276 | user_picks_distribution() { 277 | # let the user pick a distribution 278 | 279 | echo >&2 280 | echo >&2 "I NEED YOUR HELP" 281 | echo >&2 "It seems I cannot detect your system automatically." 282 | if [ -z "${equo}" -a -z "${emerge}" -a -z "${apt_get}" -a -z "${yum}" -a -z "${dnf}" -a -z "${pacman}" -a -z "${apk}" ] 283 | then 284 | echo >&2 "And it seems I cannot find a known package manager in this system." 285 | echo >&2 "Please open a github issue to help us support your system too." 286 | exit 1 287 | fi 288 | 289 | local opts= 290 | echo >&2 "I found though that the following installers are available:" 291 | echo >&2 292 | [ ! -z "${apt_get}" ] && echo >&2 " - Debian/Ubuntu based (installer is: apt-get)" && opts="apt-get ${opts}" 293 | [ ! -z "${yum}" ] && echo >&2 " - Redhat/Fedora/Centos based (installer is: yum)" && opts="yum ${opts}" 294 | [ ! -z "${dnf}" ] && echo >&2 " - Redhat/Fedora/Centos based (installer is: dnf)" && opts="dnf ${opts}" 295 | [ ! -z "${zypper}" ] && echo >&2 " - SuSe based (installer is: zypper)" && opts="zypper ${opts}" 296 | [ ! -z "${pacman}" ] && echo >&2 " - Arch Linux based (installer is: pacman)" && opts="pacman ${opts}" 297 | [ ! -z "${emerge}" ] && echo >&2 " - Gentoo based (installer is: emerge)" && opts="emerge ${opts}" 298 | [ ! -z "${equo}" ] && echo >&2 " - Sabayon based (installer is: equo)" && opts="equo ${opts}" 299 | [ ! -z "${apk}" ] && echo >&2 " - Alpine Linux based (installer is: apk)" && opts="apk ${opts}" 300 | echo >&2 301 | 302 | REPLY= 303 | while [ -z "${REPLY}" ] 304 | do 305 | echo "To proceed please write one of these:" 306 | echo "${opts}" | sed -e 's/ /, /g' 307 | read -p ">" REPLY 308 | [ $? -ne 0 ] && REPLY= && continue 309 | 310 | if [ "${REPLY}" = "yum" -a -z "${distribution}" ] 311 | then 312 | REPLY= 313 | while [ -z "${REPLY}" ] 314 | do 315 | read -p "yum in centos, rhel or fedora? > " 316 | [ $? -ne 0 ] && continue 317 | 318 | case "${REPLY,,}" in 319 | fedora|rhel) 320 | distribution="rhel" 321 | ;; 322 | centos) 323 | distribution="centos" 324 | ;; 325 | *) 326 | echo >&2 "Please enter 'centos', 'fedora' or 'rhel'." 327 | REPLY= 328 | ;; 329 | esac 330 | done 331 | REPLY="yum" 332 | fi 333 | check_package_manager "${REPLY}" || REPLY= 334 | done 335 | } 336 | 337 | detect_package_manager_from_distribution() { 338 | case "${1,,}" in 339 | arch*|manjaro*) 340 | package_installer="install_pacman" 341 | tree="arch" 342 | if [ ${IGNORE_INSTALLED} -eq 0 -a -z "${pacman}" ] 343 | then 344 | echo >&2 "command 'pacman' is required to install packages on a '${distribution} ${version}' system." 345 | exit 1 346 | fi 347 | ;; 348 | 349 | sabayon*) 350 | package_installer="install_equo" 351 | tree="sabayon" 352 | if [ ${IGNORE_INSTALLED} -eq 0 -a -z "${equo}" ] 353 | then 354 | echo >&2 "command 'equo' is required to install packages on a '${distribution} ${version}' system." 355 | # Maybe offer to fall back on emerge? Both installers exist in Sabayon... 356 | exit 1 357 | fi 358 | ;; 359 | 360 | alpine*) 361 | package_installer="install_apk" 362 | tree="alpine" 363 | if [ ${IGNORE_INSTALLED} -eq 0 -a -z "${apk}" ] 364 | then 365 | echo >&2 "command 'apk' is required to install packages on a '${distribution} ${version}' system." 366 | exit 1 367 | fi 368 | ;; 369 | 370 | gentoo*) 371 | package_installer="install_emerge" 372 | tree="gentoo" 373 | if [ ${IGNORE_INSTALLED} -eq 0 -a -z "${emerge}" ] 374 | then 375 | echo >&2 "command 'emerge' is required to install packages on a '${distribution} ${version}' system." 376 | exit 1 377 | fi 378 | ;; 379 | 380 | debian*|ubuntu*) 381 | package_installer="install_apt_get" 382 | tree="debian" 383 | if [ ${IGNORE_INSTALLED} -eq 0 -a -z "${apt_get}" ] 384 | then 385 | echo >&2 "command 'apt-get' is required to install packages on a '${distribution} ${version}' system." 386 | exit 1 387 | fi 388 | ;; 389 | 390 | centos*|clearos*) 391 | echo >&2 "You should have EPEL enabled to install all the prerequisites." 392 | echo >&2 "Check: http://www.tecmint.com/how-to-enable-epel-repository-for-rhel-centos-6-5/" 393 | package_installer="install_yum" 394 | tree="centos" 395 | if [ ${IGNORE_INSTALLED} -eq 0 -a -z "${yum}" ] 396 | then 397 | echo >&2 "command 'yum' is required to install packages on a '${distribution} ${version}' system." 398 | exit 1 399 | fi 400 | ;; 401 | 402 | fedora*|redhat*|red\ hat*|rhel*) 403 | package_installer= 404 | tree="rhel" 405 | [ ! -z "${yum}" ] && package_installer="install_yum" 406 | [ ! -z "${dnf}" ] && package_installer="install_dnf" 407 | if [ ${IGNORE_INSTALLED} -eq 0 -a -z "${package_installer}" ] 408 | then 409 | echo >&2 "command 'yum' or 'dnf' is required to install packages on a '${distribution} ${version}' system." 410 | exit 1 411 | fi 412 | ;; 413 | 414 | suse*|opensuse*|sles*) 415 | package_installer="install_zypper" 416 | tree="suse" 417 | if [ ${IGNORE_INSTALLED} -eq 0 -a -z "${zypper}" ] 418 | then 419 | echo >&2 "command 'zypper' is required to install packages on a '${distribution} ${version}' system." 420 | exit 1 421 | fi 422 | ;; 423 | 424 | *) 425 | # oops! unknown system 426 | user_picks_distribution 427 | ;; 428 | esac 429 | } 430 | 431 | check_package_manager() { 432 | # This is called only when the user is selecting a package manager 433 | # It is used to verify the user selection is right 434 | 435 | echo >&2 "Checking package manager: ${1}" 436 | 437 | case "${1}" in 438 | apt-get) 439 | [ ${IGNORE_INSTALLED} -eq 0 -a -z "${apt_get}" ] && echo >&2 "${1} is not available." && return 1 440 | package_installer="install_apt_get" 441 | tree="debian" 442 | detection="user-input" 443 | return 0 444 | ;; 445 | 446 | dnf) 447 | [ ${IGNORE_INSTALLED} -eq 0 -a -z "${dnf}" ] && echo >&2 "${1} is not available." && return 1 448 | package_installer="install_dnf" 449 | tree="rhel" 450 | detection="user-input" 451 | return 0 452 | ;; 453 | 454 | apk) 455 | [ ${IGNORE_INSTALLED} -eq 0 -a -z "${apk}" ] && echo >&2 "${1} is not available." && return 1 456 | package_installer="install_apk" 457 | tree="alpine" 458 | detection="user-input" 459 | return 0 460 | ;; 461 | 462 | equo) 463 | [ ${IGNORE_INSTALLED} -eq 0 -a -z "${equo}" ] && echo >&2 "${1} is not available." && return 1 464 | package_installer="install_equo" 465 | tree="sabayon" 466 | detection="user-input" 467 | return 0 468 | ;; 469 | 470 | emerge) 471 | [ ${IGNORE_INSTALLED} -eq 0 -a -z "${emerge}" ] && echo >&2 "${1} is not available." && return 1 472 | package_installer="install_emerge" 473 | tree="gentoo" 474 | detection="user-input" 475 | return 0 476 | ;; 477 | 478 | pacman) 479 | [ ${IGNORE_INSTALLED} -eq 0 -a -z "${pacman}" ] && echo >&2 "${1} is not available." && return 1 480 | package_installer="install_pacman" 481 | tree="arch" 482 | detection="user-input" 483 | 484 | return 0 485 | ;; 486 | 487 | zypper) 488 | [ ${IGNORE_INSTALLED} -eq 0 -a -z "${zypper}" ] && echo >&2 "${1} is not available." && return 1 489 | package_installer="install_zypper" 490 | tree="suse" 491 | detection="user-input" 492 | return 0 493 | ;; 494 | 495 | yum) 496 | [ ${IGNORE_INSTALLED} -eq 0 -a -z "${yum}" ] && echo >&2 "${1} is not available." && return 1 497 | package_installer="install_yum" 498 | if [ "${distribution}" = "centos" ] 499 | then 500 | tree="centos" 501 | else 502 | tree="rhel" 503 | fi 504 | detection="user-input" 505 | return 0 506 | ;; 507 | 508 | *) 509 | echo >&2 "Invalid package manager: '${1}'." 510 | return 1 511 | ;; 512 | esac 513 | } 514 | 515 | require_cmd() { 516 | # check if any of the commands given as argument 517 | # are present on this system 518 | # If any of them is available, it returns 0 519 | # otherwise 1 520 | 521 | [ ${IGNORE_INSTALLED} -eq 1 ] && return 1 522 | 523 | local wanted found 524 | for wanted in "${@}" 525 | do 526 | found="$(which "${wanted}" 2>/dev/null)" 527 | [ -z "${found}" ] && found="$(command -v "${wanted}" 2>/dev/null)" 528 | [ ! -z "${found}" -a -x "${found}" ] && return 0 529 | done 530 | 531 | return 1 532 | } 533 | 534 | declare -A pkg_find=( 535 | ['fedora']="findutils" 536 | ['default']="WARNING|" 537 | ) 538 | 539 | declare -A pkg_distro_sdk=( 540 | ['alpine']="alpine-sdk" 541 | ['default']="NOTREQUIRED" 542 | ) 543 | 544 | declare -A pkg_autoconf=( 545 | ['gentoo']="sys-devel/autoconf" 546 | ['default']="autoconf" 547 | ) 548 | 549 | # required to compile netdata with --enable-sse 550 | # https://github.com/firehol/netdata/pull/450 551 | declare -A pkg_autoconf_archive=( 552 | ['gentoo']="sys-devel/autoconf-archive" 553 | ['alpine']="WARNING|" 554 | ['default']="autoconf-archive" 555 | 556 | # exceptions 557 | ['centos-6']="WARNING|" 558 | ['rhel-6']="WARNING|" 559 | ['rhel-7']="WARNING|" 560 | ) 561 | 562 | declare -A pkg_autogen=( 563 | ['gentoo']="sys-devel/autogen" 564 | ['alpine']="WARNING|" 565 | ['default']="autogen" 566 | 567 | # exceptions 568 | ['centos-6']="WARNING|" 569 | ['rhel-6']="WARNING|" 570 | ['ubuntu-18']="WARNING|" 571 | ) 572 | 573 | declare -A pkg_automake=( 574 | ['gentoo']="sys-devel/automake" 575 | ['default']="automake" 576 | ) 577 | 578 | declare -A pkg_bridge_utils=( 579 | ['gentoo']="net-misc/bridge-utils" 580 | ['default']="bridge-utils" 581 | ) 582 | 583 | declare -A pkg_chrony=( 584 | ['default']="chrony" 585 | ) 586 | 587 | declare -A pkg_curl=( 588 | ['gentoo']="net-misc/curl" 589 | ['sabayon']="net-misc/curl" 590 | ['default']="curl" 591 | ) 592 | 593 | declare -A pkg_gzip=( 594 | ['default']="gzip" 595 | ) 596 | 597 | declare -A pkg_tar=( 598 | ['default']="tar" 599 | ) 600 | 601 | declare -A pkg_git=( 602 | ['gentoo']="dev-vcs/git" 603 | ['default']="git" 604 | ) 605 | 606 | declare -A pkg_gcc=( 607 | ['gentoo']="sys-devel/gcc" 608 | ['default']="gcc" 609 | ) 610 | 611 | declare -A pkg_gdb=( 612 | ['gentoo']="sys-devel/gdb" 613 | ['default']="gdb" 614 | ) 615 | 616 | declare -A pkg_iotop=( 617 | ['default']="iotop" 618 | ) 619 | 620 | declare -A pkg_iproute2=( 621 | ['alpine']="iproute2" 622 | ['debian']="iproute2" 623 | ['gentoo']="sys-apps/iproute2" 624 | ['sabayon']="sys-apps/iproute2" 625 | ['default']="iproute" 626 | 627 | # exceptions 628 | ['ubuntu-12.04']="iproute" 629 | ) 630 | 631 | declare -A pkg_ipset=( 632 | ['gentoo']="net-firewall/ipset" 633 | ['default']="ipset" 634 | ) 635 | 636 | declare -A pkg_jq=( 637 | ['gentoo']="app-misc/jq" 638 | ['default']="jq" 639 | ) 640 | 641 | declare -A pkg_iptables=( 642 | ['gentoo']="net-firewall/iptables" 643 | ['default']="iptables" 644 | ) 645 | 646 | declare -A pkg_libz_dev=( 647 | ['alpine']="zlib-dev" 648 | ['arch']="zlib" 649 | ['centos']="zlib-devel" 650 | ['debian']="zlib1g-dev" 651 | ['gentoo']="sys-libs/zlib" 652 | ['sabayon']="sys-libs/zlib" 653 | ['rhel']="zlib-devel" 654 | ['suse']="zlib-devel" 655 | ['default']="" 656 | ) 657 | 658 | declare -A pkg_libuuid_dev=( 659 | ['alpine']="util-linux-dev" 660 | ['arch']="util-linux" 661 | ['centos']="libuuid-devel" 662 | ['debian']="uuid-dev" 663 | ['gentoo']="sys-apps/util-linux" 664 | ['sabayon']="sys-apps/util-linux" 665 | ['rhel']="libuuid-devel" 666 | ['suse']="libuuid-devel" 667 | ['default']="" 668 | ) 669 | 670 | declare -A pkg_libmnl_dev=( 671 | ['alpine']="libmnl-dev" 672 | ['arch']="libmnl" 673 | ['centos']="libmnl-devel" 674 | ['debian']="libmnl-dev" 675 | ['gentoo']="net-libs/libmnl" 676 | ['sabayon']="net-libs/libmnl" 677 | ['rhel']="libmnl-devel" 678 | ['suse']="libmnl-devel" 679 | ['default']="" 680 | ) 681 | 682 | declare -A pkg_lm_sensors=( 683 | ['alpine']="lm_sensors" 684 | ['arch']="lm_sensors" 685 | ['centos']="lm_sensors" 686 | ['debian']="lm-sensors" 687 | ['gentoo']="sys-apps/lm_sensors" 688 | ['sabayon']="sys-apps/lm_sensors" 689 | ['rhel']="lm_sensors" 690 | ['suse']="sensors" 691 | ['default']="lm_sensors" 692 | ) 693 | 694 | declare -A pkg_logwatch=( 695 | ['default']="logwatch" 696 | ) 697 | 698 | declare -A pkg_lxc=( 699 | ['default']="lxc" 700 | ) 701 | 702 | declare -A pkg_mailutils=( 703 | ['default']="mailutils" 704 | ) 705 | 706 | declare -A pkg_make=( 707 | ['gentoo']="sys-devel/make" 708 | ['default']="make" 709 | ) 710 | 711 | declare -A pkg_netcat=( 712 | ['alpine']="netcat-openbsd" 713 | ['arch']="netcat" 714 | ['centos']="nmap-ncat" 715 | ['debian']="netcat" 716 | ['gentoo']="net-analyzer/netcat" 717 | ['sabayon']="net-analyzer/gnu-netcat" 718 | ['rhel']="nmap-ncat" 719 | ['suse']="netcat-openbsd" 720 | ['default']="netcat" 721 | 722 | # exceptions 723 | ['centos-6']="nc" 724 | ['rhel-6']="nc" 725 | ) 726 | 727 | declare -A pkg_nginx=( 728 | ['gentoo']="www-servers/nginx" 729 | ['default']="nginx" 730 | ) 731 | 732 | declare -A pkg_nodejs=( 733 | ['gentoo']="net-libs/nodejs" 734 | ['default']="nodejs" 735 | 736 | # exceptions 737 | ['rhel-6']="WARNING|To install nodejs check: https://nodejs.org/en/download/package-manager/" 738 | ['rhel-7']="WARNING|To install nodejs check: https://nodejs.org/en/download/package-manager/" 739 | ['centos-6']="WARNING|To install nodejs check: https://nodejs.org/en/download/package-manager/" 740 | ['debian-6']="WARNING|To install nodejs check: https://nodejs.org/en/download/package-manager/" 741 | ['debian-7']="WARNING|To install nodejs check: https://nodejs.org/en/download/package-manager/" 742 | ) 743 | 744 | declare -A pkg_postfix=( 745 | ['default']="postfix" 746 | ) 747 | 748 | declare -A pkg_pkg_config=( 749 | ['alpine']="pkgconfig" 750 | ['arch']="pkgconfig" 751 | ['centos']="pkgconfig" 752 | ['debian']="pkg-config" 753 | ['gentoo']="dev-util/pkgconfig" 754 | ['sabayon']="virtual/pkgconfig" 755 | ['rhel']="pkgconfig" 756 | ['suse']="pkg-config" 757 | ['default']="pkg-config" 758 | ) 759 | 760 | declare -A pkg_python=( 761 | ['gentoo']="dev-lang/python" 762 | ['sabayon']="dev-lang/python:2.7" 763 | ['default']="python" 764 | 765 | # Exceptions 766 | ['centos-8']="python2" 767 | ) 768 | 769 | declare -A pkg_python_mysqldb=( 770 | ['alpine']="py-mysqldb" 771 | ['arch']="mysql-python" 772 | ['centos']="MySQL-python" 773 | ['debian']="python-mysqldb" 774 | ['gentoo']="dev-python/mysqlclient" 775 | ['sabayon']="dev-python/mysqlclient" 776 | ['rhel']="MySQL-python" 777 | ['suse']="python-PyMySQL" 778 | ['default']="python-mysql" 779 | 780 | # exceptions 781 | ['fedora-24']="python2-mysql" 782 | ) 783 | 784 | declare -A pkg_python3_mysqldb=( 785 | ['alpine']="WARNING|" 786 | ['arch']="WARNING|" 787 | ['centos']="WARNING|" 788 | ['debian']="python3-mysqldb" 789 | ['gentoo']="dev-python/mysqlclient" 790 | ['sabayon']="dev-python/mysqlclient" 791 | ['rhel']="WARNING|" 792 | ['suse']="WARNING|" 793 | ['default']="WARNING|" 794 | 795 | # exceptions 796 | ['debian-6']="WARNING|" 797 | ['debian-7']="WARNING|" 798 | ['debian-8']="WARNING|" 799 | ['ubuntu-12.04']="WARNING|" 800 | ['ubuntu-12.10']="WARNING|" 801 | ['ubuntu-13.04']="WARNING|" 802 | ['ubuntu-13.10']="WARNING|" 803 | ['ubuntu-14.04']="WARNING|" 804 | ['ubuntu-14.10']="WARNING|" 805 | ['ubuntu-15.04']="WARNING|" 806 | ['ubuntu-15.10']="WARNING|" 807 | ) 808 | 809 | declare -A pkg_python_psycopg2=( 810 | ['alpine']="py-psycopg2" 811 | ['arch']="python2-psycopg2" 812 | ['centos']="python-psycopg2" 813 | ['debian']="python-psycopg2" 814 | ['gentoo']="dev-python/psycopg" 815 | ['sabayon']="dev-python/psycopg:2" 816 | ['rhel']="python-psycopg2" 817 | ['suse']="python-psycopg2" 818 | ['default']="python-psycopg2" 819 | ) 820 | 821 | declare -A pkg_python3_psycopg2=( 822 | ['alpine']="py3-psycopg2" 823 | ['arch']="python-psycopg2" 824 | ['centos']="WARNING|" 825 | ['debian']="WARNING|" 826 | ['gentoo']="dev-python/psycopg" 827 | ['sabayon']="dev-python/psycopg:2" 828 | ['rhel']="WARNING|" 829 | ['suse']="WARNING|" 830 | ['default']="WARNING|" 831 | ) 832 | 833 | declare -A pkg_python_pip=( 834 | ['alpine']="py-pip" 835 | ['gentoo']="dev-python/pip" 836 | ['sabayon']="dev-python/pip" 837 | ['default']="python-pip" 838 | ) 839 | 840 | declare -A pkg_python3_pip=( 841 | ['alpine']="py3-pip" 842 | ['arch']="python-pip" 843 | ['centos']="WARNING|" 844 | ['gentoo']="dev-python/pip" 845 | ['sabayon']="dev-python/pip" 846 | ['rhel']="WARNING|" 847 | ['default']="python3-pip" 848 | ) 849 | 850 | declare -A pkg_python_pymongo=( 851 | ['alpine']="WARNING|" 852 | ['arch']="python2-pymongo" 853 | ['centos']="WARNING|" 854 | ['debian']="python-pymongo" 855 | ['gentoo']="dev-python/pymongo" 856 | ['suse']="python-pymongo" 857 | ['default']="python-pymongo" 858 | ) 859 | 860 | declare -A pkg_python3_pymongo=( 861 | ['alpine']="WARNING|" 862 | ['arch']="python-pymongo" 863 | ['centos']="WARNING|" 864 | ['debian']="python3-pymongo" 865 | ['gentoo']="dev-python/pymongo" 866 | ['suse']="python3-pymongo" 867 | ['default']="python3-pymongo" 868 | ) 869 | 870 | declare -A pkg_python_requests=( 871 | ['alpine']="py-requests" 872 | ['arch']="python2-requests" 873 | ['centos']="python-requests" 874 | ['debian']="python-requests" 875 | ['gentoo']="dev-python/requests" 876 | ['sabayon']="dev-python/requests" 877 | ['rhel']="python-requests" 878 | ['suse']="python-requests" 879 | ['default']="python-requests" 880 | 881 | ['alpine-3.1.4']="WARNING|" 882 | ['alpine-3.2.3']="WARNING|" 883 | ) 884 | 885 | declare -A pkg_python3_requests=( 886 | ['alpine']="py3-requests" 887 | ['arch']="python-requests" 888 | ['centos']="WARNING|" 889 | ['debian']="WARNING|" 890 | ['gentoo']="dev-python/requests" 891 | ['sabayon']="dev-python/requests" 892 | ['rhel']="WARNING|" 893 | ['suse']="WARNING|" 894 | ['default']="WARNING|" 895 | ) 896 | 897 | declare -A pkg_lz4=( 898 | ['alpine']="lz4-dev" 899 | ['debian']="liblz4-dev" 900 | ['ubuntu']="liblz4-dev" 901 | ['suse']="liblz4-devel" 902 | ['gentoo']="app-arch/lz4" 903 | ['default']="lz4-devel" 904 | ) 905 | 906 | declare -A pkg_libuv=( 907 | ['alpine']="libuv-dev" 908 | ['debian']="libuv1-dev" 909 | ['ubuntu']="libuv1-dev" 910 | ['gentoo']="dev-libs/libuv" 911 | ['arch']="libuv" 912 | ['default']="libuv-devel" 913 | ) 914 | 915 | declare -A pkg_openssl=( 916 | ['alpine']="openssl-dev" 917 | ['debian']="libssl-dev" 918 | ['ubuntu']="libssl-dev" 919 | ['suse']="libopenssl-devel" 920 | ['default']="openssl-devel" 921 | ) 922 | 923 | declare -A pkg_judy=( 924 | ['alpine']="WARNING|" # TODO - need to add code to download and install judy for alpine case 925 | ['debian']="libjudy-dev" 926 | ['ubuntu']="libjudy-dev" 927 | ['suse']="judy-devel" 928 | ['gentoo']="dev-libs/judy" 929 | ['arch']="judy" 930 | ['default']="Judy-devel" 931 | ) 932 | 933 | declare -A pkg_python3=( 934 | ['gentoo']="dev-lang/python" 935 | ['sabayon']="dev-lang/python:3.4" 936 | ['default']="python3" 937 | 938 | # exceptions 939 | ['centos-6']="WARNING|" 940 | ) 941 | 942 | declare -A pkg_screen=( 943 | ['gentoo']="app-misc/screen" 944 | ['sabayon']="app-misc/screen" 945 | ['default']="screen" 946 | ) 947 | 948 | declare -A pkg_sudo=( 949 | ['default']="sudo" 950 | ) 951 | 952 | declare -A pkg_sysstat=( 953 | ['default']="sysstat" 954 | ) 955 | 956 | declare -A pkg_tcpdump=( 957 | ['gentoo']="net-analyzer/tcpdump" 958 | ['default']="tcpdump" 959 | ) 960 | 961 | declare -A pkg_traceroute=( 962 | ['alpine']=" " 963 | ['gentoo']="net-analyzer/traceroute" 964 | ['default']="traceroute" 965 | ) 966 | 967 | declare -A pkg_valgrind=( 968 | ['gentoo']="dev-util/valgrind" 969 | ['default']="valgrind" 970 | ) 971 | 972 | declare -A pkg_ulogd=( 973 | ['centos']="WARNING|" 974 | ['rhel']="WARNING|" 975 | ['gentoo']="app-admin/ulogd" 976 | ['default']="ulogd2" 977 | ) 978 | 979 | declare -A pkg_unzip=( 980 | ['gentoo']="app-arch/unzip" 981 | ['default']="unzip" 982 | ) 983 | 984 | declare -A pkg_zip=( 985 | ['gentoo']="app-arch/zip" 986 | ['default']="zip" 987 | ) 988 | 989 | validate_installed_package() { 990 | validate_${package_installer} "${p}" 991 | } 992 | 993 | suitable_package() { 994 | local package="${1//-/_}" p= v="${version//.*/}" 995 | 996 | # echo >&2 "Searching for ${package}..." 997 | 998 | eval "p=\${pkg_${package}['${distribution,,}-${version,,}']}" 999 | [ -z "${p}" ] && eval "p=\${pkg_${package}['${distribution,,}-${v,,}']}" 1000 | [ -z "${p}" ] && eval "p=\${pkg_${package}['${distribution,,}']}" 1001 | [ -z "${p}" ] && eval "p=\${pkg_${package}['${tree}-${version}']}" 1002 | [ -z "${p}" ] && eval "p=\${pkg_${package}['${tree}-${v}']}" 1003 | [ -z "${p}" ] && eval "p=\${pkg_${package}['${tree}']}" 1004 | [ -z "${p}" ] && eval "p=\${pkg_${package}['default']}" 1005 | 1006 | if [[ "${p/|*}" =~ ^(ERROR|WARNING|INFO)$ ]] 1007 | then 1008 | echo >&2 "${p/|*/}" 1009 | echo >&2 "package ${1} is not available in this system." 1010 | if [ -z "${p/*|/}" ] 1011 | then 1012 | echo >&2 "You may try to install without it." 1013 | else 1014 | echo >&2 "${p/*|/}" 1015 | fi 1016 | echo >&2 1017 | return 1 1018 | elif [ "${p}" = "NOTREQUIRED" ] 1019 | then 1020 | return 0 1021 | elif [ -z "${p}" ] 1022 | then 1023 | echo >&2 "WARNING" 1024 | echo >&2 "package ${1} is not availabe in this system." 1025 | echo >&2 1026 | return 1 1027 | else 1028 | if [ ${IGNORE_INSTALLED} -eq 0 ] 1029 | then 1030 | validate_installed_package "${p}" 1031 | else 1032 | echo "${p}" 1033 | fi 1034 | return 0 1035 | fi 1036 | } 1037 | 1038 | packages() { 1039 | # detect the packages we need to install on this system 1040 | 1041 | # ------------------------------------------------------------------------- 1042 | # basic build environment 1043 | 1044 | suitable_package distro-sdk 1045 | 1046 | require_cmd git || suitable_package git 1047 | require_cmd find || suitable_package find 1048 | 1049 | require_cmd gcc || \ 1050 | require_cmd gcc-multilib || suitable_package gcc 1051 | 1052 | require_cmd make || suitable_package make 1053 | require_cmd autoconf || suitable_package autoconf 1054 | suitable_package autoconf-archive 1055 | require_cmd autogen || suitable_package autogen 1056 | require_cmd automake || suitable_package automake 1057 | require_cmd pkg-config || suitable_package pkg-config 1058 | 1059 | # ------------------------------------------------------------------------- 1060 | # debugging tools for development 1061 | 1062 | if [ ${PACKAGES_DEBUG} -ne 0 ] 1063 | then 1064 | require_cmd traceroute || suitable_package traceroute 1065 | require_cmd tcpdump || suitable_package tcpdump 1066 | require_cmd screen || suitable_package screen 1067 | 1068 | if [ ${PACKAGES_NETDATA} -ne 0 ] 1069 | then 1070 | require_cmd gdb || suitable_package gdb 1071 | require_cmd valgrind || suitable_package valgrind 1072 | fi 1073 | fi 1074 | 1075 | # ------------------------------------------------------------------------- 1076 | # common command line tools 1077 | 1078 | if [ ${PACKAGES_NETDATA} -ne 0 ] 1079 | then 1080 | require_cmd tar || suitable_package tar 1081 | require_cmd curl || suitable_package curl 1082 | require_cmd gzip || suitable_package gzip 1083 | require_cmd nc || suitable_package netcat 1084 | fi 1085 | 1086 | # ------------------------------------------------------------------------- 1087 | # firehol/fireqos/update-ipsets command line tools 1088 | 1089 | if [ ${PACKAGES_FIREQOS} -ne 0 ] 1090 | then 1091 | require_cmd ip || suitable_package iproute2 1092 | fi 1093 | 1094 | if [ ${PACKAGES_FIREHOL} -ne 0 ] 1095 | then 1096 | require_cmd iptables || suitable_package iptables 1097 | require_cmd ipset || suitable_package ipset 1098 | require_cmd ulogd ulogd2 || suitable_package ulogd 1099 | require_cmd traceroute || suitable_package traceroute 1100 | require_cmd bridge || suitable_package bridge-utils 1101 | fi 1102 | 1103 | if [ ${PACKAGES_UPDATE_IPSETS} -ne 0 ] 1104 | then 1105 | require_cmd ipset || suitable_package ipset 1106 | require_cmd zip || suitable_package zip 1107 | require_cmd funzip || suitable_package unzip 1108 | fi 1109 | 1110 | # ------------------------------------------------------------------------- 1111 | # netdata libraries 1112 | 1113 | if [ ${PACKAGES_NETDATA} -ne 0 ] 1114 | then 1115 | suitable_package libz-dev 1116 | suitable_package libuuid-dev 1117 | suitable_package libmnl-dev 1118 | fi 1119 | 1120 | # ------------------------------------------------------------------------- 1121 | # sensors 1122 | 1123 | if [ ${PACKAGES_NETDATA_SENSORS} -ne 0 ] 1124 | then 1125 | require_cmd sensors || suitable_package lm_sensors 1126 | fi 1127 | 1128 | # ------------------------------------------------------------------------- 1129 | # sensors 1130 | if [ ${PACKAGES_NETDATA_DATABASE} -ne 0 ] 1131 | then 1132 | suitable_package libuv 1133 | suitable_package lz4 1134 | suitable_package openssl 1135 | suitable_package judy 1136 | fi 1137 | 1138 | # ------------------------------------------------------------------------- 1139 | # scripting interpreters for netdata plugins 1140 | 1141 | if [ ${PACKAGES_NETDATA_NODEJS} -ne 0 ] 1142 | then 1143 | require_cmd nodejs node js || suitable_package nodejs 1144 | fi 1145 | 1146 | # ------------------------------------------------------------------------- 1147 | # python2 1148 | 1149 | if [ ${PACKAGES_NETDATA_PYTHON} -ne 0 ] 1150 | then 1151 | require_cmd python || suitable_package python 1152 | 1153 | [ ${PACKAGES_NETDATA_PYTHON_MONGO} -ne 0 ] && suitable_package python-pymongo 1154 | # suitable_package python-requests 1155 | # suitable_package python-pip 1156 | 1157 | [ ${PACKAGES_NETDATA_PYTHON_MYSQL} -ne 0 ] && suitable_package python-mysqldb 1158 | [ ${PACKAGES_NETDATA_PYTHON_POSTGRES} -ne 0 ] && suitable_package python-psycopg2 1159 | fi 1160 | 1161 | # ------------------------------------------------------------------------- 1162 | # python3 1163 | 1164 | if [ ${PACKAGES_NETDATA_PYTHON3} -ne 0 ] 1165 | then 1166 | require_cmd python3 || suitable_package python3 1167 | 1168 | suitable_package python3-pymongo 1169 | [ ${PACKAGES_NETDATA_PYTHON_MONGO} -ne 0 ] && suitable_package python3-pymongo 1170 | # suitable_package python3-requests 1171 | # suitable_package python3-pip 1172 | 1173 | [ ${PACKAGES_NETDATA_PYTHON_MYSQL} -ne 0 ] && suitable_package python3-mysqldb 1174 | [ ${PACKAGES_NETDATA_PYTHON_POSTGRES} -ne 0 ] && suitable_package python3-psycopg2 1175 | fi 1176 | 1177 | # ------------------------------------------------------------------------- 1178 | # applications needed for the netdata demo sites 1179 | 1180 | if [ ${PACKAGES_NETDATA_DEMO_SITE} -ne 0 ] 1181 | then 1182 | require_cmd sudo || suitable_package sudo 1183 | require_cmd jq || suitable_package jq 1184 | require_cmd nginx || suitable_package nginx 1185 | require_cmd postconf || suitable_package postfix 1186 | require_cmd lxc-create || suitable_package lxc 1187 | require_cmd logwatch || suitable_package logwatch 1188 | require_cmd mail || suitable_package mailutils 1189 | require_cmd iostat || suitable_package sysstat 1190 | require_cmd iotop || suitable_package iotop 1191 | fi 1192 | } 1193 | 1194 | DRYRUN=0 1195 | run() { 1196 | 1197 | printf >&2 "%q " "${@}" 1198 | printf >&2 "\n" 1199 | 1200 | if [ ! "${DRYRUN}" -eq 1 ] 1201 | then 1202 | "${@}" 1203 | return $? 1204 | fi 1205 | return 0 1206 | } 1207 | 1208 | sudo= 1209 | if [ ${UID} -ne 0 ] 1210 | then 1211 | sudo="sudo" 1212 | fi 1213 | 1214 | # ----------------------------------------------------------------------------- 1215 | # debian / ubuntu 1216 | 1217 | validate_install_apt_get() { 1218 | echo >&2 " > Checking if package '${*}' is installed..." 1219 | [ "$(dpkg-query -W --showformat='${Status}\n' "${*}")" = "install ok installed" ] || echo "${*}" 1220 | } 1221 | 1222 | install_apt_get() { 1223 | # download the latest package info 1224 | if [ "${DRYRUN}" -eq 1 ] 1225 | then 1226 | echo >&2 " >> IMPORTANT << " 1227 | echo >&2 " Please make sure your system is up to date" 1228 | echo >&2 " by running: ${sudo} apt-get update " 1229 | echo >&2 1230 | fi 1231 | 1232 | local opts="" 1233 | if [ ${NON_INTERACTIVE} -eq 1 ] 1234 | then 1235 | echo >&2 "Running in non-interactive mode" 1236 | # http://serverfault.com/questions/227190/how-do-i-ask-apt-get-to-skip-any-interactive-post-install-configuration-steps 1237 | export DEBIAN_FRONTEND="noninteractive" 1238 | opts="${opts} -yq" 1239 | fi 1240 | 1241 | # install the required packages 1242 | for pkg in "${@}"; do 1243 | [[ ${DRYRUN} -eq 0 ]] && echo >&2 "Adding package ${pkg}" 1244 | run ${sudo} apt-get ${opts} install "${pkg}" 1245 | done 1246 | } 1247 | 1248 | 1249 | # ----------------------------------------------------------------------------- 1250 | # centos / rhel 1251 | 1252 | validate_install_yum() { 1253 | echo >&2 " > Checking if package '${*}' is installed..." 1254 | yum list installed "${*}" >/dev/null 2>&1 || echo "${*}" 1255 | } 1256 | 1257 | install_yum() { 1258 | # download the latest package info 1259 | if [ "${DRYRUN}" -eq 1 ] 1260 | then 1261 | echo >&2 " >> IMPORTANT << " 1262 | echo >&2 " Please make sure your system is up to date" 1263 | echo >&2 " by running: ${sudo} yum update " 1264 | echo >&2 1265 | fi 1266 | 1267 | local opts= 1268 | if [ ${NON_INTERACTIVE} -eq 1 ] 1269 | then 1270 | echo >&2 "Running in non-interactive mode" 1271 | # http://unix.stackexchange.com/questions/87822/does-yum-have-an-equivalent-to-apt-aptitudes-debian-frontend-noninteractive 1272 | opts="-y" 1273 | fi 1274 | 1275 | # install the required packages 1276 | run ${sudo} yum ${opts} install "${@}" # --enablerepo=epel-testing 1277 | } 1278 | 1279 | 1280 | # ----------------------------------------------------------------------------- 1281 | # fedora 1282 | 1283 | validate_install_dnf() { 1284 | echo >&2 " > Checking if package '${*}' is installed..." 1285 | dnf list installed "${*}" >/dev/null 2>&1 || echo "${*}" 1286 | } 1287 | 1288 | install_dnf() { 1289 | # download the latest package info 1290 | if [ "${DRYRUN}" -eq 1 ] 1291 | then 1292 | echo >&2 " >> IMPORTANT << " 1293 | echo >&2 " Please make sure your system is up to date" 1294 | echo >&2 " by running: ${sudo} dnf update " 1295 | echo >&2 1296 | fi 1297 | 1298 | local opts= 1299 | if [ ${NON_INTERACTIVE} -eq 1 ] 1300 | then 1301 | echo >&2 "Running in non-interactive mode" 1302 | # man dnf 1303 | opts="-y" 1304 | fi 1305 | 1306 | # install the required packages 1307 | # --setopt=strict=0 allows dnf to proceed 1308 | # installing whatever is available 1309 | # even if a package is not found 1310 | opts="$opts --setopt=strict=0" 1311 | run ${sudo} dnf ${opts} install "${@}" 1312 | } 1313 | 1314 | # ----------------------------------------------------------------------------- 1315 | # gentoo 1316 | 1317 | validate_install_emerge() { 1318 | echo "${*}" 1319 | } 1320 | 1321 | install_emerge() { 1322 | # download the latest package info 1323 | # we don't do this for emerge - it is very slow 1324 | # and most users are expected to do this daily 1325 | # emerge --sync 1326 | if [ "${DRYRUN}" -eq 1 ] 1327 | then 1328 | echo >&2 " >> IMPORTANT << " 1329 | echo >&2 " Please make sure your system is up to date" 1330 | echo >&2 " by running: ${sudo} emerge --sync or ${sudo} eix-sync " 1331 | echo >&2 1332 | fi 1333 | 1334 | local opts="--ask" 1335 | if [ ${NON_INTERACTIVE} -eq 1 ] 1336 | then 1337 | echo >&2 "Running in non-interactive mode" 1338 | opts="" 1339 | fi 1340 | 1341 | # install the required packages 1342 | for pkg in "${@}"; do 1343 | [[ ${DRYRUN} -eq 0 ]] && echo >&2 "Adding package ${pkg}" 1344 | run ${sudo} emerge ${opts} -v --noreplace "${pkg}" 1345 | done 1346 | } 1347 | 1348 | 1349 | # ----------------------------------------------------------------------------- 1350 | # alpine 1351 | 1352 | validate_install_apk() { 1353 | echo "${*}" 1354 | } 1355 | 1356 | install_apk() { 1357 | # download the latest package info 1358 | if [ "${DRYRUN}" -eq 1 ] 1359 | then 1360 | echo >&2 " >> IMPORTANT << " 1361 | echo >&2 " Please make sure your system is up to date" 1362 | echo >&2 " by running: ${sudo} apk update " 1363 | echo >&2 1364 | fi 1365 | 1366 | local opts="--force-broken-world" 1367 | if [ ${NON_INTERACTIVE} -eq 1 ] 1368 | then 1369 | echo >&2 "Running in non-interactive mode" 1370 | else 1371 | opts="${opts} -i" 1372 | fi 1373 | 1374 | # install the required packages 1375 | run ${sudo} apk add ${opts} "${@}" 1376 | } 1377 | 1378 | # ----------------------------------------------------------------------------- 1379 | # sabayon 1380 | 1381 | validate_install_equo() { 1382 | echo >&2 " > Checking if package '${*}' is installed..." 1383 | equo s --installed "${*}" >/dev/null 2>&1 || echo "${*}" 1384 | } 1385 | 1386 | install_equo() { 1387 | # download the latest package info 1388 | if [ "${DRYRUN}" -eq 1 ] 1389 | then 1390 | echo >&2 " >> IMPORTANT << " 1391 | echo >&2 " Please make sure your system is up to date" 1392 | echo >&2 " by running: ${sudo} equo up " 1393 | echo >&2 1394 | fi 1395 | 1396 | local opts="-av" 1397 | if [ ${NON_INTERACTIVE} -eq 1 ] 1398 | then 1399 | echo >&2 "Running in non-interactive mode" 1400 | opts="-v" 1401 | fi 1402 | 1403 | # install the required packages 1404 | for pkg in "${@}"; do 1405 | [[ ${DRYRUN} -eq 0 ]] && echo >&2 "Adding package ${pkg}" 1406 | run ${sudo} equo i ${opts} "${pkg}" 1407 | done 1408 | } 1409 | 1410 | # ----------------------------------------------------------------------------- 1411 | # arch 1412 | 1413 | PACMAN_DB_SYNCED=0 1414 | validate_install_pacman() { 1415 | 1416 | if [ ${PACMAN_DB_SYNCED} -eq 0 ]; then 1417 | echo >&2 " > Running pacman -Sy to sync the database" 1418 | local x=$(pacman -Sy) 1419 | [ ! -n "${x}" ] && echo "${*}" 1420 | PACMAN_DB_SYNCED=1 1421 | fi; 1422 | echo >&2 " > Checking if package '${*}' is installed..." 1423 | 1424 | # In pacman, you can utilize alternative flags to exactly match package names, 1425 | # but is highly likely we require pattern matching too in this so we keep -s and match 1426 | # the exceptional cases like so 1427 | local x="" 1428 | case "${package}" in 1429 | "gcc") 1430 | # Temporary workaround: In archlinux, default installation includes runtime libs under package "gcc" 1431 | # These are not sufficient for netdata install, so we need to make sure that the appropriate libraries are there 1432 | # by ensuring devel libs are available 1433 | x=$(pacman -Qs "${*}" | grep "base-devel") 1434 | ;; 1435 | "tar") 1436 | x=$(pacman -Qs "${*}" | grep "local/tar") 1437 | ;; 1438 | "make") 1439 | x=$(pacman -Qs "${*}" | grep "local/make ") 1440 | ;; 1441 | *) 1442 | x=$(pacman -Qs "${*}") 1443 | ;; 1444 | esac 1445 | 1446 | [ ! -n "${x}" ] && echo "${*}" 1447 | } 1448 | 1449 | install_pacman() { 1450 | # download the latest package info 1451 | if [ "${DRYRUN}" -eq 1 ] 1452 | then 1453 | echo >&2 " >> IMPORTANT << " 1454 | echo >&2 " Please make sure your system is up to date" 1455 | echo >&2 " by running: ${sudo} pacman -Syu " 1456 | echo >&2 1457 | fi 1458 | 1459 | # install the required packages 1460 | if [ ${NON_INTERACTIVE} -eq 1 ] 1461 | then 1462 | echo >&2 "Running in non-interactive mode" 1463 | # http://unix.stackexchange.com/questions/52277/pacman-option-to-assume-yes-to-every-question/52278 1464 | for pkg in "${@}"; do 1465 | [[ ${DRYRUN} -eq 0 ]] && echo >&2 "Adding package ${pkg}" 1466 | # Try the noconfirm option, if that fails, go with the legacy way for non-interactive 1467 | run ${sudo} pacman --noconfirm --needed -S "${pkg}" || yes | run ${sudo} pacman --needed -S "${pkg}" 1468 | done 1469 | 1470 | else 1471 | for pkg in "${@}"; do 1472 | [[ ${DRYRUN} -eq 0 ]] && echo >&2 "Adding package ${pkg}" 1473 | run ${sudo} pacman --needed -S "${pkg}" 1474 | done 1475 | fi 1476 | } 1477 | 1478 | # ----------------------------------------------------------------------------- 1479 | # suse / opensuse 1480 | 1481 | validate_install_zypper() { 1482 | rpm -q "${*}" >/dev/null 2>&1 || echo "${*}" 1483 | } 1484 | 1485 | install_zypper() { 1486 | # download the latest package info 1487 | if [ "${DRYRUN}" -eq 1 ] 1488 | then 1489 | echo >&2 " >> IMPORTANT << " 1490 | echo >&2 " Please make sure your system is up to date" 1491 | echo >&2 " by running: ${sudo} zypper update " 1492 | echo >&2 1493 | fi 1494 | 1495 | local opts="--ignore-unknown" 1496 | if [ ${NON_INTERACTIVE} -eq 1 ] 1497 | then 1498 | echo >&2 "Running in non-interactive mode" 1499 | # http://unix.stackexchange.com/questions/82016/how-to-use-zypper-in-bash-scripts-for-someone-coming-from-apt-get 1500 | opts="${opts} --non-interactive" 1501 | fi 1502 | 1503 | # install the required packages 1504 | run ${sudo} zypper ${opts} install "${@}" 1505 | } 1506 | 1507 | install_failed() { 1508 | local ret="${1}" 1509 | cat </dev/null 2>&1 -Ss --max-time 3 "https://registry.my-netdata.io/log/installer?status=${1}&error=${2}&distribution=${distribution}&version=${version}&installer=${package_installer}&tree=${tree}&detection=${detection}&netdata=${PACKAGES_NETDATA}&nodejs=${PACKAGES_NETDATA_NODEJS}&python=${PACKAGES_NETDATA_PYTHON}&python3=${PACKAGES_NETDATA_PYTHON3}&mysql=${PACKAGES_NETDATA_PYTHON_MYSQL}&postgres=${PACKAGES_NETDATA_PYTHON_POSTGRES}&pymongo=${PACKAGES_NETDATA_PYTHON_MONGO}&sensors=${PACKAGES_NETDATA_SENSORS}&database=${PACKAGE_NETDATA_DATABASE}&firehol=${PACKAGES_FIREHOL}&fireqos=${PACKAGES_FIREQOS}&iprange=${PACKAGES_IPRANGE}&update_ipsets=${PACKAGES_UPDATE_IPSETS}&demo=${PACKAGES_NETDATA_DEMO_SITE}" 1541 | } 1542 | 1543 | if [ -z "${1}" ] 1544 | then 1545 | usage 1546 | exit 1 1547 | fi 1548 | 1549 | # parse command line arguments 1550 | DONT_WAIT=0 1551 | NON_INTERACTIVE=0 1552 | IGNORE_INSTALLED=0 1553 | while [ ! -z "${1}" ] 1554 | do 1555 | case "${1}" in 1556 | distribution) 1557 | distribution="${2}" 1558 | shift 1559 | ;; 1560 | 1561 | version) 1562 | version="${2}" 1563 | shift 1564 | ;; 1565 | 1566 | codename) 1567 | codename="${2}" 1568 | shift 1569 | ;; 1570 | 1571 | installer) 1572 | check_package_manager "${2}" || exit 1 1573 | shift 1574 | ;; 1575 | 1576 | dont-wait|--dont-wait|-n) 1577 | DONT_WAIT=1 1578 | ;; 1579 | 1580 | non-interactive|--non-interactive|-y) 1581 | NON_INTERACTIVE=1 1582 | ;; 1583 | 1584 | ignore-installed|--ignore-installed|-i) 1585 | IGNORE_INSTALLED=1 1586 | ;; 1587 | 1588 | netdata-all) 1589 | PACKAGES_NETDATA=1 1590 | PACKAGES_NETDATA_NODEJS=1 1591 | PACKAGES_NETDATA_PYTHON=1 1592 | PACKAGES_NETDATA_PYTHON_MYSQL=1 1593 | PACKAGES_NETDATA_PYTHON_POSTGRES=1 1594 | PACKAGES_NETDATA_PYTHON_MONGO=1 1595 | PACKAGES_NETDATA_SENSORS=1 1596 | PACKAGES_NETDATA_DATABASE=1 1597 | ;; 1598 | 1599 | netdata) 1600 | PACKAGES_NETDATA=1 1601 | PACKAGES_NETDATA_PYTHON=1 1602 | PACKAGES_NETDATA_DATABASE=1 1603 | ;; 1604 | 1605 | python|netdata-python) 1606 | PACKAGES_NETDATA_PYTHON=1 1607 | ;; 1608 | 1609 | python3|netdata-python3) 1610 | PACKAGES_NETDATA_PYTHON3=1 1611 | ;; 1612 | 1613 | python-mysql|mysql-python|mysqldb|netdata-mysql) 1614 | PACKAGES_NETDATA_PYTHON=1 1615 | PACKAGES_NETDATA_PYTHON_MYSQL=1 1616 | ;; 1617 | 1618 | python-postgres|postgres-python|psycopg2|netdata-postgres) 1619 | PACKAGES_NETDATA_PYTHON=1 1620 | PACKAGES_NETDATA_PYTHON_POSTGRES=1 1621 | ;; 1622 | 1623 | python-pymongo) 1624 | PACKAGES_NETDATA_PYTHON=1 1625 | PACKAGES_NETDATA_PYTHON_MONGO=1 1626 | ;; 1627 | 1628 | nodejs|netdata-nodejs) 1629 | PACKAGES_NETDATA=1 1630 | PACKAGES_NETDATA_NODEJS=1 1631 | PACKAGES_NETDATA_DATABASE=1 1632 | ;; 1633 | 1634 | sensors|netdata-sensors) 1635 | PACKAGES_NETDATA=1 1636 | PACKAGES_NETDATA_PYTHON=1 1637 | PACKAGES_NETDATA_SENSORS=1 1638 | PACKAGES_NETDATA_DATABASE=1 1639 | ;; 1640 | 1641 | firehol|update-ipsets|firehol-all|fireqos) 1642 | PACKAGES_IPRANGE=1 1643 | PACKAGES_FIREHOL=1 1644 | PACKAGES_FIREQOS=1 1645 | PACKAGES_IPRANGE=1 1646 | PACKAGES_UPDATE_IPSETS=1 1647 | ;; 1648 | 1649 | demo|all) 1650 | PACKAGES_NETDATA=1 1651 | PACKAGES_NETDATA_NODEJS=1 1652 | PACKAGES_NETDATA_PYTHON=1 1653 | PACKAGES_NETDATA_PYTHON3=1 1654 | PACKAGES_NETDATA_PYTHON_MYSQL=1 1655 | PACKAGES_NETDATA_PYTHON_POSTGRES=1 1656 | PACKAGES_NETDATA_PYTHON_MONGO=1 1657 | PACKAGES_DEBUG=1 1658 | PACKAGES_IPRANGE=1 1659 | PACKAGES_FIREHOL=1 1660 | PACKAGES_FIREQOS=1 1661 | PACKAGES_UPDATE_IPSETS=1 1662 | PACKAGES_NETDATA_DEMO_SITE=1 1663 | PACKAGES_NETDATA_DATABASE=1 1664 | ;; 1665 | 1666 | help|-h|--help) 1667 | usage 1668 | exit 1 1669 | ;; 1670 | 1671 | *) 1672 | echo >&2 "ERROR: Cannot understand option '${1}'" 1673 | echo >&2 1674 | usage 1675 | exit 1 1676 | ;; 1677 | esac 1678 | shift 1679 | done 1680 | 1681 | # Check for missing core commands like grep, warn the user to install it and bail out cleanly 1682 | if ! command -v grep > /dev/null 2>&1; then 1683 | echo >&2 1684 | echo >&2 "ERROR: 'grep' is required for the install to run correctly and was not found on the system." 1685 | echo >&2 "Please install grep and run the installer again." 1686 | echo >&2 1687 | exit 1 1688 | fi 1689 | 1690 | if [ -z "${package_installer}" -o -z "${tree}" ] 1691 | then 1692 | if [ -z "${distribution}" ] 1693 | then 1694 | # we dont know the distribution 1695 | autodetect_distribution || user_picks_distribution 1696 | fi 1697 | 1698 | # When no package installer is detected, try again from distro info if any 1699 | if [ -z "${package_installer}" ]; then 1700 | detect_package_manager_from_distribution "${distribution}" 1701 | fi 1702 | 1703 | fi 1704 | 1705 | pv=$(python --version 2>&1) 1706 | if [[ "${pv}" =~ ^Python\ 2.* ]] 1707 | then 1708 | pv=2 1709 | elif [[ "${pv}" =~ ^Python\ 3.* ]] 1710 | then 1711 | pv=3 1712 | PACKAGES_NETDATA_PYTHON3=1 1713 | else 1714 | pv=2 1715 | fi 1716 | 1717 | [ "${detection}" = "/etc/os-release" ] && cat <&2 1745 | echo >&2 "The following command will be run:" 1746 | echo >&2 1747 | DRYRUN=1 1748 | ${package_installer} "${PACKAGES_TO_INSTALL[@]}" 1749 | DRYRUN=0 1750 | echo >&2 1751 | echo >&2 1752 | 1753 | if [ ${DONT_WAIT} -eq 0 -a ${NON_INTERACTIVE} -eq 0 ] 1754 | then 1755 | read -p "Press ENTER to run it > " || exit 1 1756 | fi 1757 | 1758 | ${package_installer} "${PACKAGES_TO_INSTALL[@]}" || install_failed $? 1759 | 1760 | echo >&2 1761 | echo >&2 "All Done! - Now proceed to the next step." 1762 | echo >&2 1763 | 1764 | else 1765 | echo >&2 1766 | echo >&2 "All required packages are already installed. Now proceed to the next step." 1767 | echo >&2 1768 | fi 1769 | 1770 | remote_log "OK" 1771 | 1772 | exit 0 1773 | -------------------------------------------------------------------------------- /install.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | LC_ALL=C 4 | umask 022 5 | 6 | # make sure host is here 7 | which host >/dev/null || { echo >&2 "Install: host" && exit 1; } 8 | which ip >/dev/null || { echo >&2 "Install: ip" && exit 1; } 9 | # which dig >/dev/null || { echo >&2 "Install: dnsutils" && exit 1; } 10 | 11 | # find the device of the default gateway 12 | wan="$(ip -4 route get 8.8.8.8 | grep -oP "dev [^[:space:]]+ " | cut -d ' ' -f 2)" 13 | [ -z "${wan}" ] && wan="eth0" 14 | echo >&2 "Assuming default gateway is via device: ${wan}" 15 | 16 | # find our IP 17 | myip=( $(ip -4 address show ${wan} | grep 'inet' | sed 's/.*inet \([0-9\.]\+\).*/\1/') ) 18 | if [ -z "${myip[*]}" ] 19 | then 20 | echo >&2 "Cannot find my IP !" 21 | exit 1 22 | fi 23 | 24 | hostname_fqdn="$(hostname --fqdn)" 25 | if [ -z "${hostname_fqdn}" ] 26 | then 27 | cat < " 56 | 57 | 58 | # ----------------------------------------------------------------------------- 59 | 60 | ./install-required-packages.sh demo || exit 1 61 | 62 | # ----------------------------------------------------------------------------- 63 | 64 | ./install-all-firehol.sh || exit 1 65 | 66 | # ----------------------------------------------------------------------------- 67 | 68 | tmp=/tmp/installer.$RANDOM.$RANDOM.$$ 69 | 70 | myinstall() { 71 | local file="${1}" owner="${2}" perms="${3}" callback="${4}" 72 | 73 | echo >&2 74 | echo >&2 "Checking: /${file} ..." 75 | 76 | if [ ! -f "files/${file}" ] 77 | then 78 | echo >&2 "Cannot find file '${file}'" 79 | return 1 80 | fi 81 | 82 | cat "files/${file}" | sed \ 83 | -e "s|MY_WAN_INTERFACE_TO_BE_REPLACED_HERE|${wan}|g" \ 84 | -e "s|MY_REAL_IP_TO_BE_REPLACED_HERE|${myip[*]}|g" \ 85 | -e "s|MY_HOSTNAME_TO_BE_REPLACED_HERE|$(hostname -s)|g" \ 86 | -e "s|MY_FQDN_HOSTNAME_TO_BE_REPLACED_HERE|${hostname_fqdn}|g" \ 87 | >"${tmp}" 88 | 89 | if [ ! -s "${tmp}" ] 90 | then 91 | echo " >> empty sized converted file: ${tmp} from files/${file}" 92 | return 1 93 | fi 94 | 95 | if [ -f "/${file}" ] 96 | then 97 | diff -q "/${file}" "${tmp}" 98 | if [ $? -eq 0 ] 99 | then 100 | echo >&2 " >> it is the same..." 101 | return 0 102 | else 103 | echo >&2 " >> file /${file} has differences: " 104 | diff "/${file}" "${tmp}" 105 | REPLY= 106 | while [ "${REPLY}" != "y" -a "${REPLY}" != "Y" ] 107 | do 108 | read -p "update /${file} ? [y/n] > " 109 | case "${REPLY}" in 110 | y|Y) break ;; 111 | n|N) return 0;; 112 | esac 113 | done 114 | fi 115 | fi 116 | 117 | echo >&2 " >> installing: /${file} ..." 118 | cp "${tmp}" "/${file}" || return 1 119 | chown "${owner}" "/${file}" || return 1 120 | chmod "${perms}" "/${file}" || return 1 121 | 122 | if [ ! -z "${callback}" ] 123 | then 124 | echo >&2 " >> running: ${callback}" 125 | ${callback} || return 1 126 | fi 127 | 128 | return 0 129 | } 130 | 131 | 132 | # ----------------------------------------------------------------------------- 133 | # FireHOL / FireQOS 134 | 135 | myinstall etc/firehol/cloudflare.netset root:root 600 || exit 1 136 | myinstall etc/firehol/firehol.conf root:root 600 || exit 1 137 | myinstall etc/firehol/fireqos.conf root:root 600 || exit 1 138 | myinstall etc/systemd/system/firehol.service root:root 644 || exit 1 139 | myinstall etc/systemd/system/fireqos.service root:root 644 || exit 1 140 | 141 | myinstall etc/sysctl.d/core.conf root:root 644 || exit 1 142 | myinstall etc/sysctl.d/synproxy.conf root:root 644 || exit 1 143 | myinstall etc/sysctl.d/net-buffers.conf root:root 644 || exit 1 144 | myinstall etc/sysctl.d/net-security.conf root:root 644 || exit 1 145 | myinstall etc/sysctl.d/inotify.conf root:root 644 || exit 1 146 | myinstall etc/sysctl.d/entropy.conf root:root 644 || exit 1 147 | sysctl --system 148 | 149 | 150 | 151 | # ----------------------------------------------------------------------------- 152 | # NGINX 153 | 154 | myinstall etc/nginx/cloudflare.conf root:root 644 || exit 1 155 | myinstall etc/nginx/conf.d/status.conf root:root 644 || exit 1 156 | myinstall etc/nginx/conf.d/netdata.conf root:root 644 || exit 1 157 | 158 | cat >files/etc/nginx/snippets/ssl-certs.conf <files/etc/systemd/system.conf || exit 1 201 | myinstall etc/systemd/system.conf root:root 644 || exit 1 202 | systemctl daemon-reexec 203 | 204 | # ----------------------------------------------------------------------------- 205 | # ADD USERS 206 | 207 | myadduser() { 208 | local username="${1}" key="${2}" home= 209 | 210 | getent passwd "${username}" >/dev/null 2>&1 || useradd -m ${username} 211 | 212 | eval "local home=~${username}" 213 | if [ -z "${home}" -o ! -d "${home}" ] 214 | then 215 | echo >&2 "Cannot find the home dir of user ${username}" 216 | exit 1 217 | fi 218 | 219 | mkdir -p files/${home}/.ssh || exit 1 220 | mkdir -p ${home}/.ssh || exit 1 221 | if [ -f "${home}/.ssh/authorized_keys" ] 222 | then 223 | ( echo "${key}"; cat ${home}/.ssh/authorized_keys; ) | sort -u >files/${home}/.ssh/authorized_keys 224 | else 225 | echo "${key}" >files/${home}/.ssh/authorized_keys 226 | fi 227 | 228 | myinstall ./${home}/.ssh/authorized_keys ${username} 644 || exit 1 229 | 230 | # add the key to root 231 | mkdir -p files/root/.ssh || exit 1 232 | mkdir -p /root/.ssh || exit 1 233 | if [ -f /root/.ssh/authorized_keys ] 234 | then 235 | ( echo "${key}"; cat /root/.ssh/authorized_keys; ) | sort -u >files/root/.ssh/authorized_keys 236 | else 237 | echo "${key}" >files/root/.ssh/authorized_keys 238 | fi 239 | 240 | myinstall root/.ssh/authorized_keys root:root 644 || exit 1 241 | 242 | if [ -f files/etc/sudoers.d/${username} ] 243 | then 244 | myinstall etc/sudoers.d/${username} root:root 400 || exit 1 245 | fi 246 | } 247 | myadduser costa "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCvh2gm+bcosazdtW7kd82in5/8rOB/SmsQnt+vNBpniBwM2TUfpcBpR/ydV3IA0B/tR/vWGm3Ak6pkCrAOm70URKx6aQKeUmqK3TxkXKehZA5eWifcZSyS6StQpPQLWW1PbtviFWwsWiJPA++uWfnMu3B2P2mc3lAUTAPv7Deii1SRTKj9RZW7jZ88mD/5SUSVIudu7f+X1oXycvwen/Zen29ot3E9zzjuqeDD+vGcQp9olfXPSrgR8IGYgdFDHieC9OXPiGS/VgZX+P3YFxR/xpWz1+7hq2TIU+7QFz1kclF+5eWzUiHmdyPj0T97tPHCD5yuQVbTmdHE197YndbB costa@tsaousis.gr" 248 | 249 | # ----------------------------------------------------------------------------- 250 | # CONFIGURE POSTFIX 251 | 252 | cat files/etc/aliases 258 | myinstall etc/aliases root:root 644 259 | newaliases 260 | 261 | myinstall etc/postfix/generic root:root 644 262 | myinstall etc/postfix/recipient_canonical.pcre root:root 644 263 | 264 | postconf -e "myhostname = $(hostname -s).my-netdata.io" 265 | postconf -e "mydomain = my-netdata.io" 266 | postconf -e "myorigin = my-netdata.io" 267 | postconf -e "mynetworks = 127.0.0.0/8 [::ffff:127.0.0.0]/104 [::1]/128 172.16.254.0/24" 268 | postconf -e "relay_domains = my-netdata.io, mynetdata.io, netdata.cloud, netdata.online, netdata.rocks" 269 | postconf -e "mailbox_size_limit = 0" 270 | postconf -e "recipient_delimiter = +" 271 | postconf -e "inet_interfaces = localhost" 272 | postconf -e "smtpd_tls_security_level=may" 273 | postconf -# "smtpd_use_tls" 274 | postconf -# "smtpd_enforce_tls" 275 | postconf -e "alias_maps = hash:/etc/aliases" 276 | postconf -e "smtp_generic_maps = hash:/etc/postfix/generic" 277 | postconf -e "recipient_canonical_maps = regexp:/etc/postfix/recipient_canonical.pcre" 278 | 279 | 280 | # ----------------------------------------------------------------------------- 281 | # ENABLE EVERTYTHING 282 | 283 | echo >&2 "Reloading systemd" 284 | systemctl daemon-reload || exit 1 285 | 286 | echo >&2 "Enabling ulogd2" 287 | systemctl enable ulogd2 || exit 1 288 | 289 | echo >&2 "Enabling firehol" 290 | systemctl enable firehol || exit 1 291 | 292 | echo >&2 "Enabling fireqos" 293 | systemctl enable fireqos || exit 1 294 | 295 | echo >&2 "Enabling postfix" 296 | systemctl enable postfix || exit 1 297 | 298 | echo >&2 "Enabling nginx" 299 | systemctl enable nginx || exit 1 300 | 301 | echo >&2 "Enabling netdata" 302 | systemctl enable netdata || exit 1 303 | 304 | echo >&2 "Enabling LXC" 305 | systemctl enable lxc || exit 1 306 | 307 | 308 | # ----------------------------------------------------------------------------- 309 | # START EVERYTHING 310 | 311 | echo >&2 312 | 313 | echo >&2 "Starting ulogd2" 314 | systemctl restart ulogd2 || exit 1 315 | 316 | echo >&2 "Starting firehol" 317 | systemctl restart firehol || exit 1 318 | 319 | echo >&2 "Starting fireqos" 320 | systemctl restart fireqos || exit 1 321 | 322 | echo >&2 "Restarting postfix" 323 | systemctl restart postfix || exit 1 324 | 325 | echo >&2 "Starting nginx" 326 | systemctl restart nginx || exit 1 327 | 328 | echo >&2 "Restarting netdata" 329 | systemctl restart netdata || exit 1 330 | 331 | echo >&2 "Starting LXC" 332 | systemctl start lxc || exit 1 333 | 334 | 335 | # ----------------------------------------------------------------------------- 336 | # SSL (nginx has to be running) 337 | 338 | if [ ! -d /etc/letsencrypt/live/${hostname_fqdn}/ ] 339 | then 340 | 341 | do_ssl=1 342 | if [ "$(hostname -s).my-netdata.io" != "${hostname_fqdn}" ] 343 | then 344 | echo >&2 "CANNOT INSTALL LETSENCRYPT - WRONG HOSTNAME: $(hostname -s).my-netdata.io is not ${hostname_fqdn}" 345 | do_ssl=0 346 | fi 347 | 348 | if [ "${myip}" != "${hostname_resolved}" ] 349 | then 350 | echo >&2 "CANNOT INSTALL LETSENCRYPT - ${hostname_fqdn} is resolved to ${hostname_resolved}, instead of ${myip}" 351 | do_ssl=0 352 | fi 353 | 354 | if [ ${do_ssl} -eq 1 ] 355 | then 356 | 357 | echo >&2 "Reloading nginx to let it serve letsencrypt ACMA..." 358 | nginx -t || { echo >&2 "nginx config is not consistent."; exit 1; } 359 | nginx -s reload || { echo >&2 "nginx failed to be reloaded."; exit 1; } 360 | 361 | if [ -d /usr/src/letsencrypt.git ] 362 | then 363 | cd /usr/src/letsencrypt.git || exit 1 364 | git fetch --all || exit 1 365 | git reset --hard origin/master || exit 1 366 | else 367 | cd /usr/src 368 | git clone https://github.com/letsencrypt/letsencrypt.git letsencrypt.git || exit 1 369 | cd letsencrypt.git || exit 1 370 | fi 371 | 372 | ./letsencrypt-auto certonly --renew-by-default --text --agree-tos \ 373 | --webroot --webroot-path=/var/www/html \ 374 | --email costa@tsaousis.gr \ 375 | -d ${hostname_fqdn} || exit 1 376 | 377 | fi 378 | fi 379 | 380 | 381 | # ----------------------------------------------------------------------------- 382 | 383 | cat <&2 401 | echo >&2 "All done!" 402 | exit 0 403 | -------------------------------------------------------------------------------- /lxc/install.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # for installing older ubuntu releases: 4 | # apt-get install ubuntu-archive-keyring 5 | 6 | base="/var/lib/lxc" 7 | cd "${base}" || exit 1 8 | 9 | NULL= 10 | 11 | was_ok=() 12 | ok=() 13 | failed=() 14 | started=() 15 | nostart=() 16 | 17 | # NAME TEMPLATE DISTRO RELEASE ARCH 18 | for x in \ 19 | "alpine31 alpine - v3.1 amd64" \ 20 | "alpine32 alpine - v3.2 amd64" \ 21 | "alpine33 download alpine 3.3 amd64" \ 22 | "alpine34 download alpine 3.4 amd64" \ 23 | "alpine35 download alpine 3.5 amd64" \ 24 | "alpine36 download alpine 3.6 amd64" \ 25 | "alpineedge download alpine edge amd64" \ 26 | "arch download archlinux current amd64" \ 27 | "centos6 download centos 6 amd64" \ 28 | "centos7 download centos 7 amd64" \ 29 | "cirros cirros - - amd64" \ 30 | "debian10 download debian buster amd64" \ 31 | "debian8 download debian jessie amd64" \ 32 | "debiansid download debian sid amd64" \ 33 | "debian9 download debian stretch amd64" \ 34 | "debian7 download debian wheezy amd64" \ 35 | "fedora24 download fedora 24 amd64" \ 36 | "fedora25 download fedora 25 amd64" \ 37 | "fedora26 download fedora 26 amd64" \ 38 | "gentoo download gentoo current amd64" \ 39 | "opensuse422 download opensuse 42.2 amd64" \ 40 | "opensuse423 download opensuse 42.3 amd64" \ 41 | "oracle6 download oracle 6 amd64" \ 42 | "oracle7 download oracle 7 amd64" \ 43 | "plamo5 download plamo 5.x amd64" \ 44 | "plamo6 download plamo 6.x amd64" \ 45 | "ubuntu1204 download ubuntu precise amd64" \ 46 | "ubuntu1404 download ubuntu trusty amd64" \ 47 | "ubuntu1604 download ubuntu xenial amd64" \ 48 | "ubuntu1610 ubuntu - yakkety amd64" \ 49 | "ubuntu1704 download ubuntu zesty amd64" \ 50 | "ubuntu1710 download ubuntu artful amd64" \ 51 | ; 52 | do 53 | # "ubuntu1510 ubuntu-old - wily amd64" \ 54 | # "ubuntu1504 ubuntu-old - vivid amd64" \ 55 | # "ubuntu1410 ubuntu-old - utopic amd64" \ 56 | # "ubuntu1310 ubuntu-old - saucy amd64" \ 57 | # "ubuntu1304 ubuntu-old - raring amd64" \ 58 | # "ubuntu1210 ubuntu-old - quantal amd64" \ 59 | # "ubuntu1110 ubuntu-old - oneiric amd64" \ 60 | # "ubuntu1104 ubuntu-old - natty amd64" \ 61 | # "ubuntu1010 ubuntu-old - maverick amd64" \ 62 | # "ubuntu1004 ubuntu-old - lucid amd64" \ 63 | 64 | a=(${x}) 65 | name="${a[0]}" 66 | template="${a[1]}" 67 | distro="${a[2]}" 68 | release="${a[3]}" 69 | arch="${a[4]}" 70 | 71 | opts=() 72 | case "${template}" in 73 | download) 74 | opts+=("-d" "${distro}" "-r" "${release}" "--no-validate") 75 | ;; 76 | 77 | ubuntu-old) 78 | template="ubuntu" 79 | opts+=("-r" "${release}" "--mirror" "http:/old-releases.ubuntu.com/ubuntu" "--security-mirror" "http:/old-releases.ubuntu.com/ubuntu") 80 | ;; 81 | 82 | *) 83 | [ "${release}" != "-" ] && opts+=("-r" "${release}") 84 | ;; 85 | esac 86 | opts+=("-a" "${arch}") 87 | 88 | if [ -d "${name}" -a ! -d "${name}/rootfs" ] 89 | then 90 | echo >&2 "Removing incomplete container: ${name}" 91 | rm -rf "${name}" 92 | fi 93 | 94 | inst=0 95 | if [ ! -d "${name}" ] 96 | then 97 | echo 98 | echo "lxc-create -n ${name} -t ${template} -- ${opts[*]}" 99 | lxc-create -n "${name}" -t "${template}" -- "${opts[@]}" 100 | ret=$? 101 | if [ $ret -eq 0 ] 102 | then 103 | ok+=("${name}") 104 | inst=1 105 | else 106 | echo >&2 "FAILED with code $ret" 107 | failed+=("${name}") 108 | fi 109 | else 110 | echo >&2 "Found installed container: ${name}" 111 | was_ok+=("${name}") 112 | inst=1 113 | fi 114 | 115 | if [ $inst -eq 1 ] 116 | then 117 | systemctl enable lxc@${name}.service && \ 118 | systemctl start lxc@${name}.service 119 | 120 | if [ $? -eq 0 ] 121 | then 122 | started+=("${name}") 123 | else 124 | nostart+=("${name}") 125 | fi 126 | fi 127 | done 128 | 129 | cat <&2 "Cannot find the IP address of interface '${lxcbr}'." 15 | exit 1 16 | fi 17 | 18 | # ----------------------------------------------------------------------------- 19 | # api keys 20 | 21 | lxcapikey=$(&2 "Cannot create a UUID" && exit 1 26 | echo "${lxcapikey}" >/etc/netdata/apikey.lxc 27 | fi 28 | 29 | demoapikey=$(&2 "Cannot find the latest binary netdata." && exit 1 37 | curl "${STATICBASE}/${LATEST}" >"/tmp/${LATEST}" 38 | 39 | [ ! -s "/tmp/${LATEST}" ] && echo >&2 "Cannot download latest binary netdata." && exit 1 40 | 41 | 42 | # ----------------------------------------------------------------------------- 43 | # prepare the master 44 | 45 | cat >/etc/netdata/netdata.conf </etc/netdata/stream.conf <>/etc/netdata/stream.conf <&2 120 | echo >&2 "working on lxc container: ${x}" 121 | 122 | mkdir -p "${lxcbase}/${x}/rootfs/opt/netdata/etc/netdata" || continue 123 | 124 | cp "/tmp/${LATEST}" "${lxcbase}/${x}/rootfs/opt/netdata-latest.run" 125 | chmod 755 "${lxcbase}/${x}/rootfs/opt/netdata-latest.run" 126 | 127 | cat >"${lxcbase}/${x}/rootfs/opt/netdata/etc/netdata/netdata.conf" <"${lxcbase}/${x}/rootfs/opt/netdata/etc/netdata/stream.conf" <&2 4 | 5 | x=$(grep newrelic /etc/apt/sources.list.d/newrelic.list) 6 | if [ -z "${x}" ] 7 | then 8 | echo >&2 "Adding NewRelic to apt..." 9 | echo deb http://apt.newrelic.com/debian/ newrelic non-free >>/etc/apt/sources.list.d/newrelic.list 10 | wget -O- https://download.newrelic.com/548C16BF.gpg | apt-key add - 11 | apt-get update 12 | fi 13 | 14 | echo >&2 "Installing newrelic agent..." 15 | apt-get install newrelic-sysmond 16 | 17 | echo >&2 "Setting newrelic license key..." 18 | nrsysmond-config --set license_key=4048c8a4e7e604a87074eb565aa10acdd6a94adb 19 | 20 | echo >&2 "Restarting newrelic agent..." 21 | /etc/init.d/newrelic-sysmond restart 22 | --------------------------------------------------------------------------------