├── vpn ├── config │ ├── pap-secrets.in │ ├── etc-ipsec.secrets.conf.in │ ├── pam-ppp.in │ ├── vpn.sh.in │ ├── options.xl2tpd.in │ ├── xl2tpd.conf.in │ ├── etc-rc.local.in │ └── openswan.conf.in └── vpn.yml ├── openstack-example ├── files │ └── index.php ├── templates │ ├── keepalived.lbs.conf │ ├── gluster-init.j2 │ ├── keepalived.lbs.conf.j2 │ ├── keepalived.sql.conf.j2 │ ├── nginx-lb.conf.j2 │ ├── my.cnf.j2 │ └── wp-config.php.j2 ├── handlers │ └── main.yml ├── vars │ └── main.yml ├── tasks │ ├── keepalived.yml │ ├── configure-lbs.yml │ ├── configure-app.yml │ ├── configure-gluster-app.yml │ ├── configure-dbs.yml │ └── create-instances.yml └── main.yml ├── tor ├── handlers │ └── main.yml ├── example-playbook.yml ├── ansible_hosts.example ├── templates │ ├── tor.list.j2 │ ├── rc.local.j2 │ ├── iptables-save.j2 │ ├── torrc.j2 │ ├── tor.gpg.key.j2 │ └── notice.html.j2 ├── README.md ├── defaults │ └── main.yml └── tasks │ └── main.yml ├── collectd ├── config │ ├── thresholds.conf │ ├── filters.conf │ ├── collection.conf │ ├── restartd.conf │ ├── collectd-client.conf │ ├── collectd-server.conf │ ├── iptables.sh │ └── plugins.conf └── collectd.yml ├── raymon ├── config │ ├── raymonserver.in │ └── stat.php └── raymon.yml ├── .gitignore ├── munin-client ├── config │ ├── munin-node.conf.in │ └── plugin-conf.in └── munin-client.yml ├── vnstat ├── vnstat.yml └── config │ └── vnstat.conf.in ├── start └── start.yml ├── goaccess ├── tasks │ └── main.yml └── templates │ └── goaccess.conf.j2 ├── oh-my-zsh ├── ohmyzsh.yml └── files │ └── zshrc.in ├── sudo └── sudo.yml ├── lighttpd-nodes ├── config │ └── lighttpd.conf.in └── lighttpd-nodes.yml └── README.md /vpn/config/pap-secrets.in: -------------------------------------------------------------------------------- 1 | * l2tpd "" * -------------------------------------------------------------------------------- /openstack-example/files/index.php: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tor/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart tor 3 | service: name=tor state=restarted 4 | 5 | -------------------------------------------------------------------------------- /vpn/config/etc-ipsec.secrets.conf.in: -------------------------------------------------------------------------------- 1 | {{ ipv4_address }} %any: PSK "{{ shared_secret }}" 2 | -------------------------------------------------------------------------------- /tor/example-playbook.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: tor 3 | 4 | roles: 5 | - { role: tor, sudo: yes } 6 | -------------------------------------------------------------------------------- /collectd/config/thresholds.conf: -------------------------------------------------------------------------------- 1 | ## /etc/collectd/collectd.conf generated for {{ pbname }} by Ansible 2 | 3 | -------------------------------------------------------------------------------- /collectd/config/filters.conf: -------------------------------------------------------------------------------- 1 | ## /etc/collectd/filters.conf generated for {{ pbname }} by Ansible 2 | 3 | 4 | 5 | -------------------------------------------------------------------------------- /tor/ansible_hosts.example: -------------------------------------------------------------------------------- 1 | [tor] 2 | vps.example.org tor_node_name=example_vps accounting_max="100GB" accounting_start="day 10:00" 3 | -------------------------------------------------------------------------------- /tor/templates/tor.list.j2: -------------------------------------------------------------------------------- 1 | ## {{ ansible_managed }} 2 | deb http://deb.torproject.org/torproject.org {{ ansible_lsb.codename }} main 3 | -------------------------------------------------------------------------------- /collectd/config/collection.conf: -------------------------------------------------------------------------------- 1 | ## /etc/collectd/collectd.conf generated for {{ pbname }} by Ansible 2 | datadir: "/var/lib/collectd/rrd/" 3 | libdir: "/usr/lib/collectd/" 4 | 5 | -------------------------------------------------------------------------------- /vpn/config/pam-ppp.in: -------------------------------------------------------------------------------- 1 | auth required pam_nologin.so 2 | auth required pam_unix.so 3 | account required pam_unix.so 4 | session required pam_unix.so -------------------------------------------------------------------------------- /raymon/config/raymonserver.in: -------------------------------------------------------------------------------- 1 | ## Cronjob for host {{ pbname }} for ray-mon server script 2 | ## Generated by Ansible 3 | 4 | */6 * * * * {{ scriptdir }}/client.sh > /var/www/stat.json 5 | -------------------------------------------------------------------------------- /vpn/config/vpn.sh.in: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | ## generated for {{ pbname }} by Ansible 3 | for vpn in /proc/sys/net/ipv4/conf/*; do echo 0 > $vpn/accept_redirects; echo 0 > $vpn/send_redirects; done -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | ###################### 2 | # OS generated files # 3 | ###################### 4 | .DS_Store 5 | .DS_Store? 6 | ._* 7 | .Spotlight-V100 8 | .Trashes 9 | Icon? 10 | ehthumbs.db 11 | Thumbs.db 12 | -------------------------------------------------------------------------------- /vpn/config/options.xl2tpd.in: -------------------------------------------------------------------------------- 1 | ms-dns 8.8.8.8 2 | ms-dns 8.8.4.4 3 | auth 4 | mtu 1200 5 | mru 1000 6 | crtscts 7 | hide-password 8 | modem 9 | name l2tpd 10 | proxyarp 11 | lcp-echo-interval 30 12 | lcp-echo-failure 4 13 | login -------------------------------------------------------------------------------- /vpn/config/xl2tpd.conf.in: -------------------------------------------------------------------------------- 1 | [global] 2 | ipsec saref = yes 3 | 4 | [lns default] 5 | ip range = 172.16.1.30-172.16.1.100 6 | local ip = 172.16.1.1 7 | unix authentication = yes 8 | require authentication = yes 9 | ppp debug = yes 10 | pppoptfile = /etc/ppp/options.xl2tpd 11 | length bit = yes -------------------------------------------------------------------------------- /tor/README.md: -------------------------------------------------------------------------------- 1 | # Tor playbook for Ansible 2 | 3 | A simple playbook to set up a tor relay for Ansible. Supports Debian, Ubuntu and Arch Linux. 4 | 5 | Allows you to set accounting data, bandwidth data and family date. 6 | 7 | See example playbook and example hosts, plus defaults/main.yml for defaults variable. 8 | 9 | More info: [https://raymii.org](https://raymii.org) 10 | -------------------------------------------------------------------------------- /tor/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | my_family: "" 3 | 4 | run_as_daemon: 1 5 | 6 | tor_node_name: "ididnteditheconfig" 7 | 8 | relay_bandwidth_rate: "5 MB" 9 | relay_bandwidth_burst: "10 MB" 10 | 11 | accounting_max: "5 GB" 12 | accounting_start: "day 10:00" 13 | 14 | contact_info: "0xFFFFFFFF Random Person " 15 | 16 | exit_policy: "reject *:*" 17 | 18 | mirror_dirport_to_80: "false" 19 | -------------------------------------------------------------------------------- /openstack-example/templates/keepalived.lbs.conf: -------------------------------------------------------------------------------- 1 | ! Configuration File for keepalived 2 | 3 | vrrp_instance VI_1 { 4 | state MASTER 5 | interface eth0 6 | virtual_router_id 51 7 | priority 100 8 | advert_int 1 9 | authentication { 10 | auth_type PASS 11 | auth_pass $ place secure password here. 12 | } 13 | virtual_ipaddress { 14 | 1{{ keepalived-lbs-vip }} 15 | } 16 | } -------------------------------------------------------------------------------- /tor/templates/rc.local.j2: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # 3 | # rc.local 4 | # 5 | # This script is executed at the end of each multiuser runlevel. 6 | # Make sure that the script will "exit 0" on success or any other 7 | # value on error. 8 | # 9 | # In order to enable or disable this script just change the execution 10 | # bits. 11 | # 12 | # By default this script does nothing. 13 | 14 | /sbin/iptables-restore < /etc/iptables.save 15 | 16 | exit 0 17 | 18 | -------------------------------------------------------------------------------- /openstack-example/templates/gluster-init.j2: -------------------------------------------------------------------------------- 1 | # http://serverfault.com/questions/611462/glusterfs-failing-to-mount-at-boot-with-ubuntu-14-04 2 | author "Louis Zuckerman " 3 | description "Block the mounting event for glusterfs filesystems until the network interfaces are running" 4 | 5 | instance $MOUNTPOINT 6 | 7 | start on mounting TYPE=glusterfs 8 | task 9 | exec start wait-for-state WAIT_FOR=networking WAITER=mounting-glusterfs-$MOUNTPOINT 10 | -------------------------------------------------------------------------------- /openstack-example/templates/keepalived.lbs.conf.j2: -------------------------------------------------------------------------------- 1 | ! Configuration File for keepalived 2 | 3 | vrrp_instance VI_1 { 4 | state MASTER 5 | interface eth0 6 | virtual_router_id {{ keepalived_lbs_router_id }} 7 | priority {{ keepalived_lbs_prio }} 8 | 9 | advert_int 1 10 | authentication { 11 | auth_type PASS 12 | auth_pass {{ keepalived_lbs_passwd }} 13 | } 14 | virtual_ipaddress { 15 | {{ keepalived_lbs_vip }} 16 | } 17 | } -------------------------------------------------------------------------------- /openstack-example/templates/keepalived.sql.conf.j2: -------------------------------------------------------------------------------- 1 | ! Configuration File for keepalived 2 | 3 | vrrp_instance VI_1 { 4 | state MASTER 5 | interface eth0 6 | virtual_router_id {{ keepalived_sql_router_id }} 7 | priority {{ keepalived_sql_prio }} 8 | 9 | advert_int 1 10 | authentication { 11 | auth_type PASS 12 | auth_pass {{ keepalived_sql_passwd }} 13 | } 14 | virtual_ipaddress { 15 | {{ keepalived_sql_vip }} 16 | } 17 | } -------------------------------------------------------------------------------- /openstack-example/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: restart nginx 4 | service: 5 | name: nginx 6 | state: restarted 7 | enabled: yes 8 | 9 | - name: restart keepalived 10 | service: 11 | name: keepalived 12 | state: restarted 13 | enabled: yes 14 | 15 | - name: restart mysql 16 | service: 17 | name: mysql 18 | state: restarted 19 | enabled: yes 20 | 21 | - name: restart apache2 22 | service: 23 | name: apache2 24 | state: restarted 25 | enabled: yes 26 | -------------------------------------------------------------------------------- /tor/templates/iptables-save.j2: -------------------------------------------------------------------------------- 1 | # Generated by iptables-save v1.4.14 on Sun Jun 8 21:06:52 2013 2 | *nat 3 | :PREROUTING ACCEPT [160:38439] 4 | :INPUT ACCEPT [5:260] 5 | :OUTPUT ACCEPT [5:289] 6 | :POSTROUTING ACCEPT [5:289] 7 | -A PREROUTING -i eth0 -p tcp -m tcp --dport 80 -j REDIRECT --to-ports 9030 8 | COMMIT 9 | # Completed on Sun Jun 8 21:06:52 2014 10 | # Generated by iptables-save v1.4.14 on Sun Jun 8 21:06:52 2014 11 | *filter 12 | :INPUT ACCEPT [1319:229693] 13 | :FORWARD ACCEPT [0:0] 14 | :OUTPUT ACCEPT [886:137143] 15 | COMMIT 16 | 17 | -------------------------------------------------------------------------------- /vpn/config/etc-rc.local.in: -------------------------------------------------------------------------------- 1 | #!/bin/sh -e 2 | # 3 | # rc.local 4 | # 5 | # This script is executed at the end of each multiuser runlevel. 6 | # Make sure that the script will "exit 0" on success or any other 7 | # value on error. 8 | # 9 | # In order to enable or disable this script just change the execution 10 | # bits. 11 | # 12 | # By default this script does nothing. 13 | # /etc/rc.local for host {{ pbname }} generated by Ansible 14 | 15 | for vpn in /proc/sys/net/ipv4/conf/*; do echo 0 > $vpn/accept_redirects; echo 0 > $vpn/send_redirects; done 16 | iptables --table nat --append POSTROUTING --jump MASQUERADE -------------------------------------------------------------------------------- /vpn/config/openswan.conf.in: -------------------------------------------------------------------------------- 1 | version 2.0 2 | config setup 3 | dumpdir=/var/run/pluto/ 4 | nat_traversal=yes 5 | virtual_private=%v4:10.0.0.0/8,%v4:192.168.0.0/16,%v4:172.16.0.0/12,%v6:fd00::/8,%v6:fe80::/10 6 | oe=off 7 | protostack=netkey 8 | 9 | conn L2TP-PSK-NAT 10 | rightsubnet=vhost:%priv 11 | also=L2TP-PSK-noNAT 12 | 13 | conn L2TP-PSK-noNAT 14 | authby=secret 15 | pfs=no 16 | auto=add 17 | keyingtries=3 18 | ikelifetime=8h 19 | keylife=1h 20 | type=transport 21 | left={{ ipv4_address }} 22 | leftprotoport=17/1701 23 | right=%any 24 | rightprotoport=17/%any 25 | -------------------------------------------------------------------------------- /tor/templates/torrc.j2: -------------------------------------------------------------------------------- 1 | ## {{ ansible_managed }} 2 | SocksPort 0 3 | SocksPolicy reject * 4 | Log notice syslog 5 | RunAsDaemon {{ run_as_daemon }} 6 | ORPort 9001 7 | Nickname {{ tor_node_name }} 8 | RelayBandwidthRate {{ relay_bandwidth_rate }} 9 | RelayBandwidthBurst {{ relay_bandwidth_burst }} 10 | {% if accounting_max is defined %} 11 | AccountingMax {{ accounting_max }} 12 | AccountingStart {{ accounting_start }} 13 | {% endif %} 14 | ContactInfo {{ contact_info }} 15 | DirPort 9030 16 | DirPortFrontPage /etc/tor/notice.html 17 | {% if my_family is defined %} 18 | MyFamily {{ my_family }} 19 | {% endif %} 20 | ExitPolicy {{ exit_policy }} 21 | 22 | -------------------------------------------------------------------------------- /munin-client/config/munin-node.conf.in: -------------------------------------------------------------------------------- 1 | ## munin-node config file for host {{ pbname }} 2 | ## Autogenerated by Ansible by Raymii.org 3 | 4 | log_level 4 5 | log_file /var/log/munin/munin-node.log 6 | pid_file /var/run/munin/munin-node.pid 7 | background 1 8 | timeout 120 9 | setsid 1 10 | user root 11 | group root 12 | ignore_file ~$ 13 | ignore_file DEADJOE$ 14 | ignore_file \.bak$ 15 | ignore_file %$ 16 | ignore_file \.dpkg-(tmp|new|old|dist)$ 17 | ignore_file \.rpm(save|new)$ 18 | ignore_file \.pod$ 19 | 20 | host_name {{ pbname }} 21 | host {{ listenaddr }} 22 | port {{ listenport }} 23 | 24 | {% for address in allowed_addresses %} 25 | allow {{ address }} 26 | {% endfor %} 27 | -------------------------------------------------------------------------------- /raymon/raymon.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: status-servers 3 | sudo: True 4 | user: remy 5 | 6 | vars: 7 | scriptdir: "/srv/http/status" 8 | wwwuser: "www-data" 9 | wwwgroup: "www-data" 10 | pbname: $inventory_hostname 11 | 12 | 13 | tasks: 14 | 15 | - name: Create ray-mon server script directory 16 | file: path=$scriptdir/ state=directory owner=$wwwuser group=$wwwuser mode=0755 17 | 18 | - name: Get static files from github (for the html kickstart framework) 19 | git: repo=git://github.com/RaymiiOrg/HTML-KickStart.git dest=$scriptdir/git 20 | 21 | - name: Copy stat.php 22 | copy: src=config/stat.php dest=$scriptdir/index.php owner=$wwwuser group=$wwwgroup mode=0755 23 | -------------------------------------------------------------------------------- /vnstat/vnstat.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: spcs 3 | sudo: True 4 | user: remy 5 | 6 | vars: 7 | interface: ${ansible_default_ipv4.interface} 8 | interface2: ${ansible_default_ipv4.alias} 9 | ipaddr: ${ansible_default_ipv4.address} 10 | 11 | tasks: 12 | - name: Install packages 13 | apt: name=$item state=latest 14 | with_items: 15 | - vnstat 16 | 17 | - name: Set up vnstat config file 18 | template: src=config/vnstat.conf.in dest=/etc/vnstat.conf mode=0755 owner=root group=root 19 | notify: restart vnstat 20 | 21 | - name: Ininitalize vnstat 22 | command: sudo vnstat -u -i $interface 23 | 24 | handlers: 25 | - name: restart vnstat 26 | action: service name=vnstat state=restarted -------------------------------------------------------------------------------- /start/start.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ## Deploy startpage app from Bas 3 | ## I need a user account which is able to sudo without password 4 | ## I also need a working webserver. 5 | 6 | - hosts: vps1.sparklingclouds.nl 7 | sudo: True 8 | user: remy 9 | 10 | vars: 11 | scriptdir: "/var/www/s" 12 | wwwuser: "www-data" 13 | wwwgroup: "www-data" 14 | pbname: $inventory_hostname 15 | 16 | 17 | tasks: 18 | 19 | - name: Remove script directory 20 | action: file path=$scriptdir/ state=absent 21 | 22 | - name: Create script directory 23 | action: file path=$scriptdir/ state=directory owner=$wwwuser group=$wwwuser mode=0755 24 | 25 | - name: Checkout app on Github 26 | action: git repo=git://github.com/RaymiiOrg/start.git dest=$scriptdir/ branch=master 27 | -------------------------------------------------------------------------------- /openstack-example/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | auth_url: https://identity.stack.cloudvps.com/v2.0 3 | login_username: 4 | login_password: 5 | login_tenant_name: 6 | image_name: 'CloudVPS Ubuntu 14.04' 7 | image_id: 8 | private_net: 9 | public_net: 00000000-0000-0000-0000-000000000000 10 | public_net_name: "net-public" 11 | keypair_name: "" 12 | flavor_id: 101 13 | website_name: "example.org" 14 | mysql_user: "app" 15 | mysql_password: "FPyfUItINE2gMsMM" 16 | 17 | keepalived_sql_vip: "10.107.244.210" 18 | keepalived_sql_router_id: "60" 19 | keepalived_sql_passwd: "AbAoSlcc2BgJiGks" 20 | 21 | keepalived_lbs_vip: "10.107.244.200" 22 | keepalived_lbs_router_id: "50" 23 | keepalived_lbs_passwd: "m9RwTzFk3MxtyXp0" 24 | 25 | gluster_brick_dir: "/srv/gluster/brick" 26 | gluster_volume: default 27 | -------------------------------------------------------------------------------- /openstack-example/tasks/keepalived.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - apt: 3 | name: "{{ item }}" 4 | state: latest 5 | update_cache: yes 6 | with_items: 7 | - keepalived 8 | 9 | - sysctl: 10 | name: net.ipv4.ip_nonlocal_bind 11 | value: "1" 12 | state: present 13 | reload: yes 14 | 15 | - template: 16 | src: keepalived.lbs.conf.j2 17 | dest: /etc/keepalived/keepalived.conf 18 | when: inventory_hostname in groups['lbs'] 19 | notify: 20 | - restart keepalived 21 | 22 | - template: 23 | src: keepalived.sql.conf.j2 24 | dest: /etc/keepalived/keepalived.conf 25 | when: inventory_hostname in groups['dbs'] 26 | notify: 27 | - restart keepalived 28 | 29 | - service: 30 | name: "{{ item }}" 31 | state: restarted 32 | enabled: yes 33 | with_items: 34 | - keepalived -------------------------------------------------------------------------------- /openstack-example/tasks/configure-lbs.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - apt_repository: 3 | repo: 'ppa:nginx/stable' 4 | state: present 5 | update_cache: yes 6 | 7 | - apt: 8 | name: "{{ item }}" 9 | state: latest 10 | update_cache: yes 11 | with_items: 12 | - nginx 13 | - vim 14 | - git 15 | - ntp 16 | 17 | - file: 18 | dest: /var/cache/nginx 19 | state: directory 20 | owner: www-data 21 | 22 | - template: 23 | src: nginx-lb.conf.j2 24 | dest: /etc/nginx/sites-available/lbs.conf 25 | notify: 26 | - restart nginx 27 | 28 | - file: 29 | src: /etc/nginx/sites-available/lbs.conf 30 | dest: /etc/nginx/sites-enabled/lbs.conf 31 | state: link 32 | notify: 33 | - restart nginx 34 | 35 | - file: 36 | dest: /etc/nginx/sites-enabled/default 37 | state: absent 38 | notify: 39 | - restart nginx 40 | 41 | 42 | -------------------------------------------------------------------------------- /openstack-example/tasks/configure-app.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - apt: 3 | name="{{ item }}" 4 | state=latest 5 | update_cache=yes 6 | with_items: 7 | - php5-mysql 8 | - python-pip 9 | - php5 10 | - libapache2-mod-php5 11 | - php5-mcrypt 12 | - vim 13 | - git 14 | - ntp 15 | 16 | - git: 17 | repo: https://github.com/WordPress/WordPress.git 18 | dest: /var/www/html/site 19 | force: yes 20 | update: no 21 | when: '"{{ inventory_hostname }}" == "{{ groups.app[0] }}"' 22 | 23 | 24 | - file: 25 | dest: /var/www/html/index.html 26 | state: absent 27 | 28 | - template: 29 | src: wp-config.php.j2 30 | dest: /var/www/html/site/wp-config.php 31 | when: '"{{ inventory_hostname }}" == "{{ groups.app[0] }}"' 32 | 33 | 34 | - copy: 35 | src: index.php 36 | dest: /var/www/html/index.php 37 | when: '"{{ inventory_hostname }}" == "{{ groups.app[0] }}"' -------------------------------------------------------------------------------- /goaccess/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - file: path=/var/keys state=directory owner=root 4 | 5 | - get_url: url=http://deb.goaccess.io/gnugpg.key dest=/var/keys/goaccess_signing.key 6 | register: result 7 | 8 | - command: "apt-key add /var/keys/goaccess_signing.key" 9 | when: result.changed 10 | 11 | - apt_repository: repo='deb http://deb.goaccess.io jessie main' state=present 12 | 13 | - apt: update_cache=yes 14 | 15 | - action: apt name=goaccess state=latest 16 | 17 | - action: apt name=xz-utils state=latest 18 | 19 | - template: src=goaccess.conf.j2 dest=/etc/goaccess.conf backup=no 20 | tags: 21 | - config 22 | 23 | - cron: name="goaccess" minute=5 hour=0 24 | user="root" job="zcat -f /var/log/nginx/access.log* | goaccess --no-progress -a > /var/www/report.html" 25 | cron_file=ansible_goaccess 26 | 27 | - cron: name="goaccess-latest" minute=0 28 | user="root" job="goaccess --no-progress -f /var/log/nginx/access.log -a > /var/www/report-lastlog.html" 29 | cron_file=ansible_goaccess-latest 30 | -------------------------------------------------------------------------------- /munin-client/munin-client.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: spcs 3 | sudo: True 4 | user: remy 5 | connection: ssh # or paramiko 6 | 7 | vars: 8 | date: $LOOKUP(date) 9 | pkg_mgr: ${ansible_pkg_mgr} 10 | pbname: $inventory_hostname 11 | listenaddr: '*' 12 | listenport: '4949' 13 | allowed_addresses: 14 | - '^127\.0\.0\.1$' 15 | - '^91\.227\.249\.12$' 16 | - '^91\.227\.249\.13$' 17 | - '^91\.227\.249\.20$' 18 | 19 | tasks: 20 | 21 | - name: Install munin-node 22 | action: $pkg_mgr name=$item state=latest 23 | with_items: 24 | - munin-node 25 | 26 | - name: Set munin-node config file 27 | template: src=config/munin-node.conf.in dest=/etc/munin/munin-node.conf owner=root group=root mode=0755 28 | notify: restart munin-node 29 | 30 | - name: Set munin-plugin config file 31 | template: src=config/plugin-conf.in dest=/etc/munin/plugin-conf.d/munin-node owner=root group=root mode=0755 32 | notify: restart munin-node 33 | 34 | handlers: 35 | - name: restart munin-node 36 | service: name=munin-node state=restarted enabled=yes 37 | -------------------------------------------------------------------------------- /collectd/config/restartd.conf: -------------------------------------------------------------------------------- 1 | # Restartd configuration file 2 | ## /etc/restartd.conf generated for {{ pbname }} by Ansible 3 | 4 | # Format: 5 | # 6 | # 7 | # 8 | # process_name: the name of the process which is just for logging 9 | # as it does not affect for the regexp 10 | # 11 | # regexp: the POSIX regular expression for the command line of the 12 | # process 13 | # 14 | # action_if_not_running: a script or program name to execute if the 15 | # regexp does not match the full process command line 16 | # 17 | # action_if_running: a script or program name to execute if the regexp 18 | # matches the full process command line 19 | 20 | # Example: 21 | # 22 | # restartd ".*restartd" "/bin/echo 'It is not running!' >/tmp/restartd.out" "/bin/echo 'It is running!' >/tmp/restartd.out" 23 | 24 | collectd-web ".*runserver.py" "su {{ user }} -l -c 'pushd /home/{{ user }}/collectd-web/ && /usr/bin/python /home/{{ user }}/collectd-web/runserver.py' >> /var/log/{{ user }}-collectd-server.log" "/bin/echo 'collectd-server running' >> /var/log/{{ user }}-collectd-server.log" 25 | 26 | -------------------------------------------------------------------------------- /collectd/config/collectd-client.conf: -------------------------------------------------------------------------------- 1 | ## /etc/collectd/collectd.conf generated for {{ pbname }} by Ansible 2 | ## Config Type: CollectD Client 3 | 4 | 5 | Hostname {{ pbname }} 6 | FQDNLookup false 7 | Interval 30 8 | ReadThreads 1 9 | LoadPlugin syslog 10 | 11 | LogLevel info 12 | 13 | 14 | LoadPlugin cpu 15 | LoadPlugin df 16 | LoadPlugin disk 17 | LoadPlugin entropy 18 | LoadPlugin interface 19 | LoadPlugin irq 20 | LoadPlugin load 21 | LoadPlugin memory 22 | LoadPlugin processes 23 | LoadPlugin rrdtool 24 | LoadPlugin swap 25 | LoadPlugin users 26 | LoadPlugin network 27 | 28 | ## Extra Plugins 29 | {% for plugin in extra_plugins %} 30 | LoadPlugin {{ plugin }} 31 | {% endfor %} 32 | 33 | ## CollectD Servers 34 | 35 | {% for address in collectd_servers %} 36 | Server "{{ address }}" "{{ collectd_port }}" 37 | {% endfor %} 38 | SecurityLevel None 39 | 40 | 41 | 42 | 43 | DataDir "/var/lib/collectd/rrd" 44 | 45 | 46 | Include "/etc/collectd/filters.conf" 47 | Include "/etc/collectd/plugins.conf" 48 | Include "/etc/collectd/thresholds.conf" 49 | -------------------------------------------------------------------------------- /oh-my-zsh/ohmyzsh.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ## Install Oh-my-zsh 3 | 4 | - hosts: spcs 5 | user: remy 6 | sudo: True 7 | connection: ssh # or paramiko 8 | 9 | vars: 10 | user: remy 11 | pbname: $inventory_hostname 12 | distro: ${ansible_distribution} 13 | pkg_mgr: ${ansible_pkg_mgr} 14 | zsh_theme: steeef 15 | 16 | tasks: 17 | - name: Install git and zsh 18 | apt: name=$item state=latest update_cache=yes 19 | with_items: 20 | - git 21 | - zsh 22 | - git-core 23 | 24 | - name: Clone oh-my-zsh repo 25 | git: repo=https://github.com/robbyrussell/oh-my-zsh.git dest=/home/$user/.oh-my-zsh 26 | 27 | - name: deploy .zshrc 28 | template: src=files/zshrc.in dest=/home/$user/conf/zshrc owner=$user 29 | 30 | - name: remove standard zshrc 31 | file: path=/home/$user/.zshrc state=absent 32 | 33 | - name: Create conf folder in home directory 34 | file: path=/home/$user/conf/ state=directory owner=$user 35 | 36 | - name: symlink zshrc 37 | file: path=/home/$user/.zshrc src=/home/$user/conf/zshrc state=link owner=$user 38 | 39 | - name: Set zsh as default shell 40 | user: name=$user shell=/bin/zsh 41 | -------------------------------------------------------------------------------- /openstack-example/templates/nginx-lb.conf.j2: -------------------------------------------------------------------------------- 1 | upstream backend { 2 | {% for host in groups['app'] %} 3 | server {{ hostvars[host]['ansible_eth0']['ipv4']['address'] }}:80 max_fails=3 fail_timeout=5s; 4 | {% endfor %} 5 | } 6 | 7 | proxy_cache_path /var/cache/nginx levels=1:2 keys_zone=CACHE:10m inactive=24h max_size=1g; 8 | 9 | server { 10 | listen 80 default_server; 11 | server_name {{ website_name }}; 12 | 13 | access_log /var/log/nginx/{{ website_name }}.access.log; 14 | error_log /var/log/nginx/{{ website_name }}.error.log; 15 | root /usr/share/nginx/html; 16 | 17 | location / { 18 | proxy_pass http://backend; 19 | proxy_next_upstream error timeout invalid_header http_500 http_502 http_503 http_504; 20 | proxy_redirect off; 21 | proxy_cache CACHE; 22 | proxy_cache_valid 200 1d; 23 | proxy_cache_use_stale error timeout invalid_header updating http_500 http_502 http_503 http_504; 24 | proxy_set_header Host $host; 25 | proxy_set_header X-Real-IP $remote_addr; 26 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 27 | } 28 | } -------------------------------------------------------------------------------- /collectd/config/collectd-server.conf: -------------------------------------------------------------------------------- 1 | ## /etc/collectd/collectd.conf generated for {{ pbname }} by Ansible 2 | ## Config Type: CollectD Server 3 | 4 | Hostname {{ pbname }} 5 | FQDNLookup false 6 | Interval 30 7 | ReadThreads 1 8 | LoadPlugin syslog 9 | 10 | LogLevel info 11 | 12 | 13 | LoadPlugin cpu 14 | LoadPlugin df 15 | LoadPlugin disk 16 | LoadPlugin entropy 17 | LoadPlugin interface 18 | LoadPlugin irq 19 | LoadPlugin load 20 | LoadPlugin memory 21 | LoadPlugin processes 22 | LoadPlugin rrdtool 23 | LoadPlugin swap 24 | LoadPlugin users 25 | LoadPlugin network 26 | 27 | ## Server config 28 | 29 | Listen "{{ ipaddr }}" "{{ collectd_port }}" 30 | Listen "{{ ip6addr }}" "{{ collectd_port }}" 31 | ReportStats true 32 | SecurityLevel None 33 | {% for address in collectd_servers %} 34 | Server "{{ address }}" "{{ collectd_port }}" 35 | {% endfor %} 36 | 37 | 38 | ## Extra Plugins 39 | {% for plugin in extra_plugins %} 40 | LoadPlugin {{ plugin }} 41 | {% endfor %} 42 | 43 | 44 | DataDir "/var/lib/collectd/rrd" 45 | 46 | 47 | Include "/etc/collectd/filters.conf" 48 | Include "/etc/collectd/plugins.conf" 49 | Include "/etc/collectd/thresholds.conf" 50 | -------------------------------------------------------------------------------- /openstack-example/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Create Instances 3 | hosts: 127.0.0.1 4 | connection: local 5 | vars_files: 6 | - "vars/main.yml" 7 | tasks: 8 | - include: tasks/create-instances.yml 9 | 10 | # Both lbs group and app group but only execute on lbs group. Otherwise 11 | # facts about app group are not gathered and config fails 12 | - name: Configure LoadBalancers 13 | hosts: lbs:app 14 | vars_files: 15 | - "vars/main.yml" 16 | user: root 17 | tasks: 18 | - include: tasks/configure-lbs.yml 19 | when: '"{{ inventory_hostname }}" in "{{ groups.lbs }}"' 20 | - include: tasks/keepalived.yml 21 | when: '"{{ inventory_hostname }}" in "{{ groups.lbs }}"' 22 | handlers: 23 | - include: handlers/main.yml 24 | 25 | - name: Configure Databases 26 | hosts: dbs 27 | vars_files: 28 | - "vars/main.yml" 29 | user: root 30 | tasks: 31 | - include: tasks/configure-dbs.yml 32 | - include: tasks/keepalived.yml 33 | handlers: 34 | - include: handlers/main.yml 35 | 36 | - name: Configure App Server 37 | hosts: app 38 | vars_files: 39 | - "vars/main.yml" 40 | user: root 41 | tasks: 42 | - include: tasks/configure-gluster-app.yml 43 | - include: tasks/configure-app.yml 44 | handlers: 45 | - include: handlers/main.yml 46 | 47 | 48 | -------------------------------------------------------------------------------- /sudo/sudo.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: spcs 3 | sudo: True 4 | user: remy 5 | connection: ssh # or paramiko 6 | 7 | vars: 8 | date: $PIPE(date +%s) 9 | distro: ${ansible_distribution} 10 | pkg_mgr: ${ansible_pkg_mgr} 11 | pbname: $inventory_hostname 12 | 13 | tasks: 14 | - apt: name=$item state=latest 15 | with_items: 16 | - sudo 17 | 18 | - name: Copy sudoers file for safety 19 | command: cp -f /etc/sudoers /etc/sudoers.tmp 20 | 21 | - name: Create sudoers file backup 22 | command: cp -f /etc/sudoers /etc/sudoers.${date}.bak 23 | 24 | - name: Create admins group 25 | group: name=admins system=yes state=present 26 | 27 | - name: make sure we can sudo as admin group 28 | lineinfile: dest=/etc/sudoers.tmp state=present regexp='^%admin ALL\=\(ALL\) ALL' line='%admin ALL=(ALL) ALL' 29 | 30 | - name: also make sure ssh-agent works via sudo 31 | lineinfile: dest=/etc/sudoers.tmp state=present regexp='^Defaults env_keep\+\=SSH_AUTH_SOCK' line='Defaults env_keep+=SSH_AUTH_SOCK' 32 | 33 | - name: and mail bad passwords 34 | lineinfile: dest=/etc/sudoers.tmp state=present regexp='^Defaults mail_badpass' line='Defaults mail_badpass' 35 | 36 | - name: Final sudoers file check 37 | shell: visudo -q -c -f /etc/sudoers.tmp && cp -f /etc/sudoers.tmp /etc/sudoers 38 | -------------------------------------------------------------------------------- /collectd/config/iptables.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | ## Firewall all the things! 3 | {{ iptables_path }} -F 4 | ## But allow what is needed. 5 | {{ iptables_path }} -A INPUT -m state --state ESTABLISHED,RELATED -j ACCEPT 6 | 7 | ## Host specific rules 8 | 9 | {% for host in groups['collectd-clients'] %} 10 | {% if hostvars[host]['ansible_default_ipv4']['address'] %} 11 | ## Host {{ hostvars[host]['inventory_hostname'] }} 12 | {{ iptables_path }} -A INPUT -p udp -s {{ hostvars[host]['ansible_default_ipv4']['address'] }} --dport {{ collectd_port }} -j ACCEPT 13 | {% endif %} 14 | {% if hostvars[host]['ansible_default_ipv6']['address'] %} 15 | ## Host {{ hostvars[host]['inventory_hostname'] }} 16 | {{ ip6tables_path }} -A INPUT -p udp -s {{ hostvars[host]['ansible_default_ipv6']['address'] }} --dport {{ collectd_port }} -j ACCEPT 17 | {% endif %} 18 | {% endfor %} 19 | 20 | 21 | {{ ip6tables_path }} -A INPUT -p udp -m udp --dport {{ collectd_port }} -m limit --limit 5/sec --limit-burst 8 -j LOG --log-prefix "collectd_port " 22 | {{ iptables_path }} -A INPUT -p udp -m udp --dport {{ collectd_port }} -m limit --limit 5/sec --limit-burst 8 -j LOG --log-prefix "collectd_port " 23 | ## Final deny rule 24 | {{ iptables_path }} -A INPUT -p udp --dport {{ collectd_port }} -j DROP 25 | {{ ip6tables_path }} -A INPUT -p udp --dport {{ collectd_port }} -j DROP -------------------------------------------------------------------------------- /openstack-example/templates/my.cnf.j2: -------------------------------------------------------------------------------- 1 | [client] 2 | port = 3306 3 | socket = /var/run/mysqld/mysqld.sock 4 | 5 | [mysqld_safe] 6 | socket = /var/run/mysqld/mysqld.sock 7 | nice = 0 8 | 9 | [mysqld] 10 | user = mysql 11 | pid-file = /var/run/mysqld/mysqld.pid 12 | socket = /var/run/mysqld/mysqld.sock 13 | port = 3306 14 | basedir = /usr 15 | datadir = /var/lib/mysql 16 | tmpdir = /tmp 17 | 18 | lc-messages-dir = /usr/share/mysql 19 | skip-external-locking 20 | bind-address = 0.0.0.0 21 | key_buffer = 16M 22 | max_allowed_packet = 16M 23 | thread_stack = 192K 24 | thread_cache_size = 8 25 | myisam-recover = BACKUP 26 | query_cache_limit = 1M 27 | query_cache_size = 16M 28 | log_error = /var/log/mysql/error.log 29 | log_bin = mysql-bin 30 | binlog_do_db = {{ mysql_user }} 31 | expire_logs_days = 10 32 | max_binlog_size = 100M 33 | auto_increment_offset = 1 34 | auto_increment_increment = 2 35 | server_id = {{ sql_server_id }} 36 | 37 | [mysqldump] 38 | quick 39 | quote-names 40 | max_allowed_packet = 16M 41 | 42 | [mysql] 43 | [isamchk] 44 | key_buffer = 16M 45 | 46 | !includedir /etc/mysql/conf.d/ 47 | -------------------------------------------------------------------------------- /goaccess/templates/goaccess.conf.j2: -------------------------------------------------------------------------------- 1 | config-dialog false 2 | color-scheme 1 3 | hl-header true 4 | no-color false 5 | no-column-names false 6 | no-progress false 7 | with-mouse false 8 | no-csv-summary false 9 | all-static-files false 10 | static-file .css 11 | static-file .CSS 12 | static-file .dae 13 | static-file .DAE 14 | static-file .eot 15 | static-file .EOT 16 | static-file .gif 17 | static-file .GIF 18 | static-file .ico 19 | static-file .ICO 20 | static-file .jpeg 21 | static-file .JPEG 22 | static-file .jpg 23 | static-file .JPG 24 | static-file .js 25 | static-file .JS 26 | static-file .map 27 | static-file .MAP 28 | static-file .mp3 29 | static-file .MP3 30 | static-file .pdf 31 | static-file .PDF 32 | static-file .png 33 | static-file .PNG 34 | static-file .svg 35 | static-file .SVG 36 | static-file .swf 37 | static-file .SWF 38 | static-file .ttf 39 | static-file .TTF 40 | static-file .txt 41 | static-file .TXT 42 | static-file .woff 43 | static-file .WOFF 44 | agent-list false 45 | http-method true 46 | http-protocol true 47 | no-query-string false 48 | no-term-resolver false 49 | real-os true 50 | with-output-resolver false 51 | 444-as-404 false 52 | 4xx-to-unique-count false 53 | double-decode false 54 | ignore-crawlers false 55 | ignore-panel REFERRERS 56 | ignore-panel KEYPHRASES 57 | time-format %T 58 | date-format %d/%b/%Y 59 | log-format %h %^[%d:%t %^] "%r" %s %b "%R" "%u" 60 | -------------------------------------------------------------------------------- /openstack-example/tasks/configure-gluster-app.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # boot bug in standard 14.04 packages: https://bugs.launchpad.net/ubuntu/+source/glusterfs/+bug/1268064 3 | - apt_repository: 4 | repo: 'ppa:semiosis/ubuntu-glusterfs-3.4' 5 | state: present 6 | update_cache: yes 7 | 8 | - apt: 9 | name: "{{ item }}" 10 | state: installed 11 | update_cache: yes 12 | with_items: 13 | - glusterfs-server 14 | - glusterfs-client 15 | 16 | - file: 17 | path: "{{ gluster_brick_dir }}" 18 | state: directory 19 | 20 | - shell: "gluster peer probe {{ item }}" 21 | with_items: 22 | - "{{ groups.app }}" 23 | 24 | - shell: 'echo {{ groups.app }} | sed -e "s/\]//g" -e "s/, u/, /g" -e "s/\[u//g" -e "s%,%:{{ gluster_brick_dir }} %g; s%$%:{{ gluster_brick_dir }}%"' 25 | register: gluster_bricks 26 | connection: local 27 | 28 | - shell: 'gluster volume info {{ gluster_volume }} || 29 | gluster volume create {{ gluster_volume }} transport tcp replica 2 30 | {{ gluster_bricks.stdout }} force' 31 | when: '"{{ inventory_hostname }}" == "{{ groups.app[0] }}"' 32 | 33 | - wait_for: 34 | delay: 15 35 | timeout: 15 36 | 37 | - shell: 'gluster volume info {{ gluster_volume }} | grep "Status: Started" || 38 | gluster volume start {{ gluster_volume }}' 39 | 40 | - file: 41 | path: "/var/www/html" 42 | state: directory 43 | 44 | - mount: 45 | name: /var/www/html 46 | fstype: glusterfs 47 | src: "{{ groups.app[0] }}:{{ gluster_volume }}" 48 | state: mounted 49 | -------------------------------------------------------------------------------- /tor/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - template: src=tor.gpg.key.j2 dest=/root/tor.gpg.key 3 | register: gpgkey 4 | when: "ansible_os_family == 'Debian'" 5 | 6 | - command: "apt-key add /root/tor.gpg.key" 7 | when: gpgkey.changed 8 | when: "ansible_os_family == 'Debian'" 9 | 10 | - template: src=tor.list.j2 dest=/etc/apt/sources.list.d/tor.list 11 | when: "ansible_os_family == 'Debian'" 12 | 13 | - file: path=/etc/tor state=directory 14 | 15 | - template: src=notice.html.j2 dest=/etc/tor/notice.html 16 | 17 | - template: src=iptables-save.j2 dest=/etc/iptables.save 18 | when: "mirror_dirport_to_80 == 'true'" 19 | register: iptables 20 | 21 | - shell: "/sbin/iptables-restore < /etc/iptables.save" 22 | when: "mirror_dirport_to_80 == 'true'" 23 | when: iptables.changed 24 | 25 | - template: src=rc.local.j2 dest=/etc/rc.local mode=0755 26 | when: "ansible_os_family == 'Debian'" 27 | 28 | - apt: name={{ item }} state=latest update_cache=yes 29 | with_items: 30 | - deb.torproject.org-keyring 31 | - haveged 32 | - tor 33 | when: "ansible_os_family == 'Debian'" 34 | 35 | - pacman: name={{ item }} state=present update_cache=yes 36 | with_items: 37 | - haveged 38 | - tor 39 | when: "ansible_os_family == 'Archlinux'" 40 | 41 | - template: src=torrc.j2 dest=/etc/tor/torrc 42 | register: torrc 43 | 44 | - service: name=tor enabled=true 45 | 46 | - service: name=tor state=restarted 47 | when: torrc.changed 48 | 49 | - command: 'find /var/lib/tor -name fingerprint -exec cut -d" " -f 2 {} \;' 50 | register: fingerprint 51 | when: torrc.changed 52 | 53 | - debug: var=fingerprint.stdout 54 | when: torrc.changed 55 | 56 | - set_fact: tor_fingerprint={{ fingerprint.stdout }} 57 | when: torrc.changed 58 | 59 | -------------------------------------------------------------------------------- /oh-my-zsh/files/zshrc.in: -------------------------------------------------------------------------------- 1 | # Path to your oh-my-zsh configuration. 2 | ZSH=$HOME/.oh-my-zsh 3 | 4 | # For Solarized 5 | export TERM="xterm-256color" 6 | 7 | # Set name of the theme to load. 8 | # Look in ~/.oh-my-zsh/themes/ 9 | # Optionally, if you set this to "random", it'll load a random theme each 10 | # time that oh-my-zsh is loaded. 11 | # ZSH_THEME="steeef" 12 | ZSH_THEME="{{ zsh_theme }}" 13 | 14 | # Example aliases 15 | # alias zshconfig="mate ~/.zshrc" 16 | # alias ohmyzsh="mate ~/.oh-my-zsh" 17 | 18 | # Set to this to use case-sensitive completion 19 | # CASE_SENSITIVE="true" 20 | 21 | # Comment this out to disable bi-weekly auto-update checks 22 | DISABLE_AUTO_UPDATE="true" 23 | 24 | # Uncomment to change how many often would you like to wait before auto-updates occur? (in days) 25 | # export UPDATE_ZSH_DAYS=13 26 | 27 | # Uncomment following line if you want to disable colors in ls 28 | # DISABLE_LS_COLORS="true" 29 | 30 | # Uncomment following line if you want to disable autosetting terminal title. 31 | # DISABLE_AUTO_TITLE="true" 32 | 33 | # Uncomment following line if you want red dots to be displayed while waiting for completion 34 | # COMPLETION_WAITING_DOTS="true" 35 | 36 | # Which plugins would you like to load? (plugins can be found in ~/.oh-my-zsh/plugins/*) 37 | # Custom plugins may be added to ~/.oh-my-zsh/custom/plugins/ 38 | # Example format: plugins=(rails git textmate ruby lighthouse) 39 | plugins=(git cp command-not-found git-extras gnu-utils history pip python ruby screen ssh-agent svn) 40 | 41 | source $ZSH/oh-my-zsh.sh 42 | 43 | # Customize to your needs... 44 | export PATH=/usr/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games 45 | 46 | PATH=$PATH:$HOME/.rvm/bin # Add RVM to PATH for scripting 47 | -------------------------------------------------------------------------------- /collectd/config/plugins.conf: -------------------------------------------------------------------------------- 1 | ## /etc/collectd/plugins.conf generated for {{ pbname }} by Ansible 2 | 3 | ## Static Plugins (every host has them) 4 | 5 | ReportByDevice false 6 | 7 | 8 | 9 | ## Dynamic Plugins (loaded by Ansible based on options) 10 | {% if "nginx" in extra_plugins %} 11 | 12 | URL "http://127.0.0.1/nginx_status" 13 | # User "www-user" 14 | # Password "secret" 15 | # VerifyPeer false 16 | # VerifyHost false 17 | # CACert "/etc/ssl/ca.crt" 18 | 19 | {% endif %} 20 | 21 | 22 | {% if "ntpd" in extra_plugins %} 23 | 24 | Host "localhost" 25 | Port 123 26 | ReverseLookups false 27 | 28 | {% endif %} 29 | 30 | {% if "ping" in extra_plugins %} 31 | 32 | Host "google.com" 33 | 34 | {% endif %} 35 | 36 | 37 | {% if "rrdcached" in extra_plugins %} 38 | 39 | DaemonAddress "unix:/var/run/rrdcached.sock" 40 | DataDir "/var/lib/rrdcached/db/collectd" 41 | CreateFiles true 42 | CollectStatistics true 43 | 44 | {% endif %} 45 | 46 | {% if "sensors" in extra_plugins %} 47 | 48 | SensorConfigFile "/etc/sensors3.conf" 49 | Sensor "it8712-isa-0290/temperature-temp1" 50 | Sensor "it8712-isa-0290/fanspeed-fan3" 51 | Sensor "it8712-isa-0290/voltage-in8" 52 | IgnoreSelected false 53 | 54 | {% endif %} 55 | 56 | {% if "graphite" in extra_plugins %} 57 | 58 | 59 | Host "{{ graphite_host }}" 60 | Port "2003" 61 | Prefix "collectd" 62 | Postfix "collectd" 63 | StoreRates false 64 | AlwaysAppendDS false 65 | EscapeCharacter "_" 66 | 67 | 68 | {% endif %} 69 | 70 | -------------------------------------------------------------------------------- /munin-client/config/plugin-conf.in: -------------------------------------------------------------------------------- 1 | ## munin-node config file for host {{ pbname }} 2 | ## Autogenerated by Ansible for raymii.org 3 | 4 | [http-status] 5 | user root 6 | 7 | [amavis] 8 | group adm 9 | env.MUNIN_MKTEMP /bin/mktemp -p /tmp/ $1 10 | env.amavislog /var/log/mail.info 11 | 12 | [apt] 13 | user root 14 | 15 | [courier_mta_mailqueue] 16 | group daemon 17 | 18 | [courier_mta_mailstats] 19 | group adm 20 | 21 | [courier_mta_mailvolume] 22 | group adm 23 | 24 | [cps*] 25 | user root 26 | 27 | [df*] 28 | env.exclude none unknown iso9660 squashfs udf romfs ramfs debugfs 29 | env.warning 92 30 | env.critical 98 31 | 32 | [exim_mailqueue] 33 | group adm, (Debian-exim) 34 | 35 | [exim_mailstats] 36 | group adm, (Debian-exim) 37 | env.logdir /var/log/exim4/ 38 | env.logname mainlog 39 | 40 | [fw_conntrack] 41 | user root 42 | 43 | [fw_forwarded_local] 44 | user root 45 | 46 | [hddtemp_smartctl] 47 | user root 48 | 49 | [hddtemp2] 50 | user root 51 | 52 | [if_*] 53 | user root 54 | 55 | [if_err_*] 56 | user nobody 57 | 58 | [ip_*] 59 | user root 60 | 61 | [ipmi_*] 62 | user root 63 | 64 | [mysql*] 65 | user root 66 | env.mysqlopts --defaults-file=/etc/mysql/debian.cnf 67 | env.mysqluser debian-sys-maint 68 | env.mysqlconnection DBI:mysql:mysql;mysql_read_default_file=/etc/mysql/debian.cnf 69 | 70 | [postfix_mailqueue] 71 | user postfix 72 | 73 | [postfix_mailstats] 74 | group adm 75 | 76 | [postfix_mailvolume] 77 | group adm 78 | env.logfile mail.log 79 | 80 | [smart_*] 81 | user root 82 | 83 | [vlan*] 84 | user root 85 | 86 | [ejabberd*] 87 | user ejabberd 88 | env.statuses available away chat xa 89 | env.days 1 7 30 90 | 91 | [dhcpd3] 92 | user root 93 | env.leasefile /var/lib/dhcp3/dhcpd.leases 94 | env.configfile /etc/dhcp3/dhcpd.conf 95 | 96 | [jmx_*] 97 | env.ip 127.0.0.1 98 | env.port 5400 99 | 100 | [samba] 101 | user root 102 | 103 | [munin_stats] 104 | user munin 105 | group munin 106 | 107 | [postgres_*] 108 | user postgres 109 | env.PGUSER postgres 110 | env.PGPORT 5432 111 | 112 | [fail2ban] 113 | user root -------------------------------------------------------------------------------- /lighttpd-nodes/config/lighttpd.conf.in: -------------------------------------------------------------------------------- 1 | ## Generated by Ansible for host: {{ pbname }} 2 | 3 | server.modules = ( 4 | "mod_access", 5 | "mod_alias", 6 | "mod_accesslog", 7 | "mod_compress", 8 | "mod_status", 9 | "mod_rewrite", 10 | "mod_auth", 11 | "mod_expire", 12 | "mod_evasive" 13 | ) 14 | 15 | server.upload-dirs = ( "/var/cache/lighttpd/uploads" ) 16 | server.errorlog = "/var/log/lighttpd/error.log" 17 | server.pid-file = "/var/run/lighttpd.pid" 18 | server.username = "{{ wuser }}" 19 | server.groupname = "{{ wgroup }}" 20 | 21 | index-file.names = ( "index.php", "index.html", "index.htm", "default.htm", " index.lighttpd.html" ) 22 | 23 | url.access-deny = ( "~", ".inc" ) 24 | 25 | static-file.exclude-extensions = ( ".php", ".pl", ".fcgi" ) 26 | 27 | include_shell "/usr/share/lighttpd/use-ipv6.pl" 28 | 29 | dir-listing.encoding = "utf-8" 30 | server.dir-listing = "{{ dirlist }}" 31 | 32 | compress.cache-dir = "/var/cache/lighttpd/compress/" 33 | compress.filetype = {{ compress }} 34 | 35 | include_shell "/usr/share/lighttpd/create-mime.assign.pl" 36 | include_shell "/usr/share/lighttpd/include-conf-enabled.pl" 37 | 38 | server.tag = "{{ servertag }}" 39 | server.document-root = "{{ wwwroot }}" 40 | server.max-keep-alive-requests = {{ servermaxkeepaliverequests }} 41 | server.max-keep-alive-idle = {{ servermaxkeepaliveidle }} 42 | server.max-read-idle = {{ servermaxreadidle }} 43 | server.max-write-idle = {{ servermaxwriteidle }} 44 | connection.kbytes-per-second = {{ connectionkbytespersecond }} 45 | server.kbytes-per-second = {{ serverkbytespersecond }} 46 | server.follow-symlink = "disable" 47 | 48 | var.confdir = "/srv/ssl/certs" 49 | $SERVER["socket"] == ":443" { 50 | ssl.engine = "enable" 51 | ssl.pemfile = var.confdir + "{{ pemfile }}" 52 | ssl.ca-file = var.confdir + "{{ cafile }}" 53 | server.name = var.confdir + "/raymii.org" 54 | server.document-root = "{{ sslwwwroot }}" 55 | } 56 | 57 | url.rewrite-once = ( 58 | {{ raymiirewrite }} 59 | ) 60 | 61 | $HTTP["url"] =~ "/\.git/" { 62 | url.access-deny = ( "" ) 63 | } 64 | 65 | $HTTP["url"] =~ "^/" { 66 | expire.url = ( "" => "{{ contentexpire }}" ) 67 | } 68 | -------------------------------------------------------------------------------- /lighttpd-nodes/lighttpd-nodes.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: nodes 3 | sudo: True 4 | user: remy 5 | 6 | vars: 7 | wwwroot: "/srv/http/" 8 | user: "remy" 9 | sslwwwroot: "/srv/https/" 10 | wuser: "www-data" 11 | wgroup: "www-data" 12 | compress: '( "application/x-javascript", "text/css", "text/html", "text/plain" )' 13 | servertag: "lighttpd" 14 | servermaxkeepaliverequests: "80" 15 | servermaxkeepaliveidle: "80" 16 | servermaxreadidle: "80" 17 | servermaxwriteidle: "80" 18 | connectionkbytespersecond: "4096" 19 | serverkbytespersecond: "8128" 20 | serverfollowsymlink: "disable" 21 | cafile: "/ca-bundle.crt" 22 | pemfile: "/cert.pem" 23 | raymiirewrite: 'rewriterule' 24 | contentexpire: 'access plus 48 hours' 25 | dirlist: "enable" 26 | 27 | 28 | 29 | tasks: 30 | - name: Install lighttpd and php 31 | apt: name=$item state=latest 32 | with_items: 33 | - lighttpd 34 | - php5-cgi 35 | 36 | - name: Create ssl certificate dir 37 | file: path=/etc/ssl/cert state=directory 38 | 39 | # - name: copy raymii.org ssl certificate 40 | # copy: src=config/cert.pem dest=/etc/ssl/cert/cert.pem owner=www-data group=www-data mode=0644 41 | 42 | # - name: copy ca-bundle ca chain 43 | # copy: src=config/ca-bundle.crt dest=/etc/ssl/cert/ca-bundle.crt owner=www-data group=www-data mode=0644 44 | 45 | 46 | - name: copy lighttpd slave node config 47 | template: src=config/lighttpd.conf.in dest=/etc/lighttpd/lighttpd.conf backup=yes 48 | notify: restart lighttpd 49 | 50 | - name: Fix cgi-pathinfo 51 | action: 'lineinfile name=/etc/php5/cgi/php.ini regexp="^cgi.fix_pathinfo=1" line="cgi.fix_pathinfo=1"' 52 | notify: restart lighttpd 53 | 54 | 55 | - name: Enable lighttpd fastcgi + php module 56 | command: lighttpd-enable-mod fastcgi && lighttpd-enable-mod fastcgi-php 57 | notify: restart lighttpd 58 | 59 | - name: Give users permisisons on /var/www folder 60 | file: path=/var/www/ state=directory owner=$user group=$user mode=755 61 | 62 | 63 | handlers: 64 | - name: restart lighttpd 65 | action: service name=lighttpd state=restarted 66 | -------------------------------------------------------------------------------- /openstack-example/tasks/configure-dbs.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - apt: 3 | name="{{ item }}" 4 | state=latest 5 | update_cache=yes 6 | with_items: 7 | - mysql-server 8 | - python-mysqldb 9 | - vim 10 | - git 11 | - ntp 12 | 13 | - template: 14 | src: my.cnf.j2 15 | dest: /etc/mysql/my.cnf 16 | notify: 17 | - restart mysql 18 | 19 | - fetch: 20 | src: /etc/mysql/debian.cnf 21 | flat: yes 22 | dest: "/tmp/my.cnf.{{ ansible_hostname }}" 23 | 24 | - copy: 25 | src: "/tmp/my.cnf.{{ ansible_hostname }}" 26 | dest: /root/.my.cnf 27 | 28 | - mysql_user: 29 | user: "" 30 | state: "absent" 31 | 32 | - mysql_user: 33 | name: "{{ mysql_user }}" 34 | password: "{{ mysql_password }}" 35 | host: "%" 36 | priv: '{{ mysql_user }}.*:ALL' 37 | state: present 38 | 39 | - mysql_db: 40 | name: "{{ mysql_user }}" 41 | state: present 42 | 43 | - mysql_user: 44 | name: "replicator" 45 | host: "%" 46 | password: "{{ mysql_password }}" 47 | priv: "*.*:REPLICATION SLAVE" 48 | state: present 49 | notify: 50 | - restart mysql 51 | 52 | - stat: path=/etc/mysql/ansible.repl 53 | register: check_sql_path 54 | 55 | - mysql_replication: 56 | mode: changemaster 57 | master_host: "{{ groups.dbs[1] }}" 58 | master_user: replicator 59 | master_password: "{{ mysql_password }}" 60 | when: check_sql_path.stat.exists == false and '{{ inventory_hostname }}' == '{{ groups.dbs[0] }}' 61 | register: sqlresult0 62 | notify: 63 | - restart mysql 64 | 65 | - mysql_replication: 66 | mode: changemaster 67 | master_host: "{{ groups.dbs[0] }}" 68 | master_user: replicator 69 | master_password: "{{ mysql_password }}" 70 | when: check_sql_path.stat.exists == false and '{{ inventory_hostname }}' == '{{ groups.dbs[1] }}' 71 | register: sqlresult1 72 | notify: 73 | - restart mysql 74 | 75 | - command: touch /etc/mysql/repl.ansible 76 | when: check_sql_path.stat.exists == false and '{{ inventory_hostname }}' == '{{ groups.dbs[1] }}' 77 | 78 | - command: touch /etc/mysql/repl.ansible 79 | when: check_sql_path.stat.exists == false and '{{ inventory_hostname }}' == '{{ groups.dbs[1] }}' 80 | 81 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Ansible 2 | ======= 3 | 4 | My ansible playbooks. 5 | 6 | * lighttpd-nodes is used for installing and configuring lighttpd. 7 | * raymon is used to deploy my little status monitoring applicaton server: [Ray-Mon](https://raymii.org/cms/p_Bash_PHP_Server_Status_Monitor) 8 | * start is for the app by Bas ten Feld: [start](https://github.com/develup/start) 9 | * munin-client is used to install munin client, it has the [hostedmunin.com](http://hostedmunin.com) servers by default, but you can essily define your own. 10 | * vnstat is used to install and setup vnstat on debian, including initializing and config file setup. 11 | * oh-my-zsh is to st up zsh and the [oh-my-zsh by robbyrussel](https://github.com/robbyrussell/oh-my-zsh) config. 12 | * collectd is to set up collectd servers and collectd clients, using the `collectd-servers` and `collectd-clients` groups, [see tutorial on Raymii.org](https://raymii.org/s/tutorials/Collectd_server_setup_tutorial_with_web_frontend.html) 13 | * sudo is to set up sudo as I like it, with an admin group and such. 14 | * vpn is used to set ip an IPSEC/L2TP VPN server with local user (PAM/UNIX) authentication [as described here](https://raymii.org/s/tutorials/IPSEC_L2TP_vpn_with_Ubuntu_12.04.html) 15 | * tor is used to set up a tor relay node 16 | * openstack-example is used for the following tutorial: []() 17 | 18 | Playbooks are here merely for example for others and reference. 19 | 20 | **Note: Playbooks may be outdated. Pull requests welcome** 21 | 22 | 23 | # License 24 | 25 | Unless otherwise stated: 26 | 27 | Copyright (C) 2014 Remy van Elst 28 | 29 | This program is free software: you can redistribute it and/or modify 30 | it under the terms of the GNU General Public License as published by 31 | the Free Software Foundation, either version 3 of the License, or 32 | (at your option) any later version. 33 | 34 | This program is distributed in the hope that it will be useful, 35 | but WITHOUT ANY WARRANTY; without even the implied warranty of 36 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 37 | GNU General Public License for more details. 38 | 39 | You should have received a copy of the GNU General Public License 40 | along with this program. If not, see . 41 | -------------------------------------------------------------------------------- /vpn/vpn.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ## IPSEC/L2TP VPN: https://raymii.org/s/tutorials/IPSEC_L2TP_vpn_with_Ubuntu_12.04.html 3 | 4 | - hosts: vpn 5 | sudo: True 6 | user: remy 7 | connection: ssh # or paramiko 8 | 9 | vars: 10 | date: $PIPE(date +%s) 11 | distro: ${ansible_distribution} 12 | pkg_mgr: ${ansible_pkg_mgr} 13 | pbname: ${inventory_hostname} 14 | ipv4_address: ${ansible_all_ipv4_addresses[0]} 15 | shared_secret: "raymiiorg" 16 | 17 | 18 | tasks: 19 | - name: Install required packages with apt 20 | apt: name={{item}} state=latest update_cache=yes 21 | with_items: 22 | - openswan 23 | - xl2tpd 24 | - ppp 25 | - lsof 26 | - iptables 27 | 28 | - name: Set IPtables routing rule 29 | command: /sbin/iptables --table nat --append POSTROUTING --jump MASQUERADE 30 | 31 | - name: set /tmp/vpn.sh 32 | template: src=config/vpn.sh.in dest=//tmp/vpn.sh owner=root mode=0755 33 | 34 | - name: run specific interface sysctl values 35 | command: /tmp/vpn.sh 36 | notify: restart xl2tpd 37 | 38 | - name: set sysctl rule net.ipv4.ip_forward to 1 39 | sysctl: name=net.ipv4.ip_forward value=1 state=present 40 | 41 | - name: set sysctl rule net.ipv4.conf.all.accept_redirects to 0 42 | sysctl: name=net.ipv4.conf.all.accept_redirects value=0 state=present 43 | 44 | - name: set sysctl rule net.ipv4.conf.all.send_redirects to 0 45 | sysctl: name=net.ipv4.conf.all.send_redirects value=0 state=present 46 | 47 | - name: set sysctl rule net.ipv4.ip_forward to 1 48 | sysctl: name=net.ipv4.ip_forward value=1 state=present 49 | 50 | - name: set /etc/rc.local 51 | template: src=config/etc-rc.local.in dest=/etc/rc.local owner=root 52 | 53 | - name: set /etc/ipsec.conf 54 | template: src=config/openswan.conf.in dest=/etc/ipsec.conf owner=root 55 | notify: restart openswan 56 | 57 | - name: set /etc/ipsec.secrets 58 | template: src=config/etc-ipsec.secrets.conf.in dest=/etc/ipsec.secrets owner=root 59 | tags: psk 60 | notify: restart openswan 61 | 62 | - name: set /etc/xl2tpd/xl2tpd.conf 63 | template: src=config/xl2tpd.conf.in dest=/etc/xl2tpd/xl2tpd.conf owner=root 64 | notify: restart xl2tpd 65 | 66 | - name: set /etc/pam.d/ppp 67 | template: src=config/pam-ppp.in dest=/etc/pam.d/ppp owner=root 68 | 69 | - name: set /etc/ppp/pap-secrets 70 | template: src=config/pap-secrets.in dest=/etc/ppp/pap-secrets owner=root 71 | 72 | - name: set /etc/ppp/options.xl2tpd 73 | template: src=config/options.xl2tpd.in dest=/etc/ppp/options.xl2tpd owner=root 74 | notify: restart xl2tpd 75 | 76 | handlers: 77 | - name: restart openswan 78 | service: name=ipsec state=restarted pattern=ipsec 79 | 80 | - name: restart xl2tpd 81 | service: name=xl2tpd state=restarted pattern=xl2tpd 82 | 83 | 84 | 85 | 86 | 87 | 88 | 89 | -------------------------------------------------------------------------------- /vnstat/config/vnstat.conf.in: -------------------------------------------------------------------------------- 1 | ## Generated by Ansible for host: {{ pbname }} 2 | ## Created by Raymii.org 3 | 4 | # default interface 5 | Interface "{{ interface }}" 6 | 7 | # location of the database directory 8 | DatabaseDir "/var/lib/vnstat" 9 | 10 | # locale (LC_ALL) ("-" = use system locale) 11 | Locale "-" 12 | 13 | # on which day should months change 14 | MonthRotate 1 15 | 16 | # date output formats for -d, -m, -t and -w 17 | # see 'man date' for control codes 18 | DayFormat "%x" 19 | MonthFormat "%b '%y" 20 | TopFormat "%x" 21 | 22 | # characters used for visuals 23 | RXCharacter "%" 24 | TXCharacter ":" 25 | RXHourCharacter "r" 26 | TXHourCharacter "t" 27 | 28 | # how units are prefixed when traffic is shown 29 | # 0 = IEC standard prefixes (KiB/MiB/GiB/TiB) 30 | # 1 = old style binary prefixes (KB/MB/GB/TB) 31 | UnitMode 0 32 | 33 | # output style 34 | # 0 = minimal & narrow, 1 = bar column visible 35 | # 2 = same as 1 except rate in summary and weekly 36 | # 3 = rate column visible 37 | OutputStyle 3 38 | 39 | # used rate unit (0 = bytes, 1 = bits) 40 | RateUnit 1 41 | 42 | # maximum bandwidth (Mbit) for all interfaces, 0 = disable feature 43 | # (unless interface specific limit is given) 44 | MaxBandwidth 0 45 | 46 | # interface specific limits 47 | # example 8Mbit limit for eth0 (remove # to activate): 48 | #MaxBWeth0 8 49 | 50 | # how many seconds should sampling for -tr take by default 51 | Sampletime 5 52 | 53 | # default query mode 54 | # 0 = normal, 1 = days, 2 = months, 3 = top10 55 | # 4 = dumpdb, 5 = short, 6 = weeks, 7 = hours 56 | QueryMode 0 57 | 58 | # filesystem disk space check (1 = enabled, 0 = disabled) 59 | CheckDiskSpace 1 60 | 61 | # database file locking (1 = enabled, 0 = disabled) 62 | UseFileLocking 1 63 | 64 | # how much the boot time can variate between updates (seconds) 65 | BootVariation 15 66 | 67 | # log days without traffic to daily list (1 = enabled, 0 = disabled) 68 | TrafficlessDays 1 69 | 70 | 71 | # vnstatd 72 | ## 73 | 74 | # how often (in seconds) interface data is updated 75 | UpdateInterval 30 76 | 77 | # how often (in seconds) interface status changes are checked 78 | PollInterval 5 79 | 80 | # how often (in minutes) data is saved to file 81 | SaveInterval 5 82 | 83 | # how often (in minutes) data is saved when all interface are offline 84 | OfflineSaveInterval 30 85 | 86 | # force data save when interface status changes (1 = enabled, 0 = disabled) 87 | SaveOnStatusChange 1 88 | 89 | # enable / disable logging (0 = disabled, 1 = logfile, 2 = syslog) 90 | UseLogging 2 91 | 92 | # file used for logging if UseLogging is set to 1 93 | LogFile "/var/log/vnstat.log" 94 | 95 | # file used as daemon pid / lock file 96 | PidFile "/var/run/vnstat.pid" 97 | 98 | 99 | # vnstati 100 | ## 101 | 102 | # title timestamp format 103 | HeaderFormat "%x %H:%M" 104 | 105 | # show hours with rate (1 = enabled, 0 = disabled) 106 | HourlyRate 1 107 | 108 | # show rate in summary (1 = enabled, 0 = disabled) 109 | SummaryRate 1 110 | 111 | # layout of summary (1 = with monthly, 0 = without monthly) 112 | SummaryLayout 1 113 | 114 | # transparent background (1 = enabled, 0 = disabled) 115 | TransparentBg 0 116 | 117 | # image colors 118 | CBackground "FFFFFF" 119 | CEdge "AEAEAE" 120 | CHeader "606060" 121 | CHeaderTitle "FFFFFF" 122 | CHeaderDate "FFFFFF" 123 | CText "000000" 124 | CLine "B0B0B0" 125 | CLineL "-" 126 | CRx "92CF00" 127 | CTx "606060" 128 | CRxD "-" 129 | CTxD "-" -------------------------------------------------------------------------------- /openstack-example/templates/wp-config.php.j2: -------------------------------------------------------------------------------- 1 | 96+%);bI.Xh)b'); 52 | define('LOGGED_IN_SALT', ')^a}zI5@.+L`XJW4NCyr]>-w&/AyNYg(QV-;:EE(hLld=@#kn~&Oe$X|n]VMA'); 53 | define('NONCE_SALT', '9xcx,*3XjNAiZ$!.E6Oc3]sM85Gzz=lIZ6F^%oDC!Ssfu;:2V9aG.bo`EPV],P/6'); 54 | 55 | /**#@-*/ 56 | 57 | /** 58 | * WordPress Database Table prefix. 59 | * 60 | * You can have multiple installations in one database if you give each a unique 61 | * prefix. Only numbers, letters, and underscores please! 62 | */ 63 | $table_prefix = 'wp_'; 64 | 65 | /** 66 | * WordPress Localized Language, defaults to English. 67 | * 68 | * Change this to localize WordPress. A corresponding MO file for the chosen 69 | * language must be installed to wp-content/languages. For example, install 70 | * de_DE.mo to wp-content/languages and set WPLANG to 'de_DE' to enable German 71 | * language support. 72 | */ 73 | define('WPLANG', ''); 74 | 75 | /** 76 | * For developers: WordPress debugging mode. 77 | * 78 | * Change this to true to enable the display of notices during development. 79 | * It is strongly recommended that plugin and theme developers use WP_DEBUG 80 | * in their development environments. 81 | */ 82 | define('WP_DEBUG', false); 83 | 84 | /* That's all, stop editing! Happy blogging. */ 85 | 86 | /** Absolute path to the WordPress directory. */ 87 | if ( !defined('ABSPATH') ) 88 | define('ABSPATH', dirname(__FILE__) . '/'); 89 | 90 | /** Sets up WordPress vars and included files. */ 91 | require_once(ABSPATH . 'wp-settings.php'); 92 | -------------------------------------------------------------------------------- /collectd/collectd.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: collectd 3 | sudo: True 4 | user: remy 5 | connection: ssh # or paramiko 6 | vars: 7 | interface: ${ansible_default_ipv4.interface} 8 | pkg_mgr: ${ansible_pkg_mgr} 9 | # ipaddr: ${ansible_default_ipv4.address} 10 | # ip6addr: ${ansible_default_ipv6.address} 11 | user: remy 12 | ipaddr: "*" 13 | ip6addr: "*" 14 | pbname: ${inventory_hostname} 15 | iptables_path: "/sbin/iptables" 16 | ip6tables_path: "/sbin/ip6tables" 17 | collectd_port: "25826" 18 | collectd_servers: 19 | - vps5.sparklingclouds.nl 20 | - vps21.sparklingclouds.nl 21 | extra_plugins: 22 | - nginx 23 | - iptables 24 | - uptime 25 | - dns 26 | - ping 27 | 28 | tasks: 29 | - apt: name=$item state=latest 30 | with_items: 31 | - collectd 32 | - librrds-perl 33 | - libconfig-general-perl 34 | - libhtml-parser-perl 35 | - libregexp-common-perl 36 | - liburi-perl 37 | - libjson-perl 38 | - restartd 39 | - python 40 | only_if: "'collectd-servers' in $group_names" 41 | tags: 42 | - packages 43 | 44 | - apt: name=$item state=latest 45 | with_items: 46 | - collectd 47 | only_if: "'collectd-clients' in $group_names" 48 | tags: 49 | - packages 50 | 51 | - action: template src=config/collectd-client.conf dest=/etc/collectd/collectd.conf owner=root group=root 52 | notify: 53 | - restart collectd 54 | only_if: "'collectd-clients' in $group_names" 55 | tags: 56 | - config 57 | 58 | 59 | - template: src=config/collectd-server.conf dest=/etc/collectd/collectd.conf owner=root group=root 60 | only_if: "'collectd-servers' in $group_names" 61 | notify: 62 | - restart collectd 63 | tags: 64 | - config 65 | 66 | - template: src=config/plugins.conf dest=/etc/collectd/plugins.conf owner=root group=root 67 | notify: 68 | - restart collectd 69 | tags: 70 | - config 71 | 72 | - template: src=config/collection.conf dest=/etc/collectd/collections.conf owner=root group=root 73 | notify: 74 | - restart collectd 75 | tags: 76 | - config 77 | 78 | - template: src=config/filters.conf dest=/etc/collectd/filters.conf owner=root group=root 79 | notify: 80 | - restart collectd 81 | tags: 82 | - config 83 | 84 | - template: src=config/thresholds.conf dest=/etc/collectd/thresholds.conf owner=root group=root 85 | notify: 86 | - restart collectd 87 | tags: 88 | - config 89 | 90 | - git: repo=git://github.com/RaymiiOrg/collectd-web.git dest=/home/$user/collectd-web/ 91 | only_if: "'collectd-servers' in $group_names" 92 | tags: 93 | - git 94 | 95 | - template: src=config/restartd.conf dest=/etc/restartd.conf owner=root group=root 96 | only_if: "'collectd-servers' in $group_names" 97 | notify: 98 | - restart restartd 99 | tags: 100 | - config 101 | 102 | 103 | - template: src=config/iptables.sh dest=/var/iptables-collectd.sh owner=root group=root mode=0755 104 | only_if: "'collectd-servers' in $group_names" 105 | tags: 106 | - firewall 107 | 108 | - command: /var/iptables-collectd.sh 109 | only_if: "'collectd-servers' in $group_names" 110 | tags: 111 | - firewall 112 | 113 | - command: "killall -9 collectd" 114 | tags: 115 | - config 116 | notify: restart collectd 117 | 118 | 119 | handlers: 120 | - name: restart collectd 121 | action: service name=collectd enabled=yes pattern=collectd state=restarted 122 | 123 | - name: restart restartd 124 | action: service name=restartd enabled=yes pattern=restartd state=restarted -------------------------------------------------------------------------------- /openstack-example/tasks/create-instances.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - nova_compute: 3 | auth_url: "{{ auth_url }}" 4 | login_username: "{{ login_username }}" 5 | login_password: "{{ login_password }}" 6 | login_tenant_name: "{{ login_tenant_name }}" 7 | security_groups: "built-in-allow-all" 8 | state: present 9 | availability_zone: "NL2" 10 | name: ansible-cluster-lb1 11 | image_id: "{{ image_id }}" 12 | key_name: "{{ keypair_name }}" 13 | wait_for: 500 14 | nics: 15 | - net-id: "{{ private_net }}" 16 | flavor_id: "{{ flavor_id }}" 17 | meta: 18 | hostname: ansible-cluster-lb1 19 | group: ansible 20 | register: openstacklb1 21 | 22 | - add_host: 23 | name: "{{ openstacklb1.private_ip }}" 24 | groupname: lbs 25 | keepalived_lbs_prio: 150 26 | 27 | - nova_compute: 28 | auth_url: "{{ auth_url }}" 29 | login_username: "{{ login_username }}" 30 | login_password: "{{ login_password }}" 31 | login_tenant_name: "{{ login_tenant_name }}" 32 | security_groups: "built-in-allow-all" 33 | state: present 34 | name: ansible-cluster-lb2 35 | availability_zone: "NL1" 36 | image_id: "{{ image_id }}" 37 | key_name: "{{ keypair_name }}" 38 | wait_for: 500 39 | nics: 40 | - net-id: "{{ private_net }}" 41 | flavor_id: "{{ flavor_id }}" 42 | meta: 43 | hostname: ansible-cluster-lb2 44 | group: ansible 45 | register: openstacklb2 46 | 47 | 48 | 49 | - add_host: 50 | name: "{{ openstacklb2.private_ip }}" 51 | groupname: lbs 52 | keepalived_lbs_prio: 100 53 | 54 | 55 | - nova_compute: 56 | auth_url: "{{ auth_url }}" 57 | login_username: "{{ login_username }}" 58 | login_password: "{{ login_password }}" 59 | login_tenant_name: "{{ login_tenant_name }}" 60 | security_groups: "built-in-allow-all" 61 | state: present 62 | name: ansible-cluster-db1 63 | availability_zone: "NL1" 64 | image_id: "{{ image_id }}" 65 | key_name: "{{ keypair_name }}" 66 | wait_for: 500 67 | nics: 68 | - net-id: "{{ private_net }}" 69 | flavor_id: "{{ flavor_id }}" 70 | meta: 71 | hostname: ansible-cluster-db1 72 | group: ansible 73 | register: openstackdb1 74 | 75 | - add_host: 76 | name: "{{ openstackdb1.private_ip }}" 77 | groupname: dbs 78 | sql_server_id: 1 79 | keepalived_sql_prio: 150 80 | 81 | 82 | 83 | - nova_compute: 84 | auth_url: "{{ auth_url }}" 85 | login_username: "{{ login_username }}" 86 | login_password: "{{ login_password }}" 87 | login_tenant_name: "{{ login_tenant_name }}" 88 | security_groups: "built-in-allow-all" 89 | state: present 90 | name: ansible-cluster-db2 91 | availability_zone: "NL2" 92 | image_id: "{{ image_id }}" 93 | key_name: "{{ keypair_name }}" 94 | wait_for: 500 95 | nics: 96 | - net-id: "{{ private_net }}" 97 | flavor_id: "{{ flavor_id }}" 98 | meta: 99 | hostname: ansible-cluster-db2 100 | group: ansible 101 | register: openstackdb2 102 | 103 | 104 | - add_host: 105 | name: "{{ openstackdb2.private_ip }}" 106 | groupname: dbs 107 | sql_server_id: 2 108 | keepalived_sql_prio: 100 109 | 110 | 111 | - nova_compute: 112 | auth_url: "{{ auth_url }}" 113 | login_username: "{{ login_username }}" 114 | login_password: "{{ login_password }}" 115 | login_tenant_name: "{{ login_tenant_name }}" 116 | security_groups: "built-in-allow-all" 117 | state: present 118 | name: ansible-cluster-app1 119 | availability_zone: "NL1" 120 | image_id: "{{ image_id }}" 121 | key_name: "{{ keypair_name }}" 122 | wait_for: 500 123 | nics: 124 | - net-id: "{{ private_net }}" 125 | flavor_id: "{{ flavor_id }}" 126 | meta: 127 | hostname: ansible-cluster-app1 128 | group: ansible 129 | register: openstackapp1 130 | 131 | - add_host: 132 | name: "{{ openstackapp1.private_ip }}" 133 | groupname: app 134 | 135 | - nova_compute: 136 | auth_url: "{{ auth_url }}" 137 | login_username: "{{ login_username }}" 138 | login_password: "{{ login_password }}" 139 | login_tenant_name: "{{ login_tenant_name }}" 140 | security_groups: "built-in-allow-all" 141 | state: present 142 | name: ansible-cluster-app2 143 | availability_zone: "NL2" 144 | image_id: "{{ image_id }}" 145 | key_name: "{{ keypair_name }}" 146 | wait_for: 500 147 | nics: 148 | - net-id: "{{ private_net }}" 149 | flavor_id: "{{ flavor_id }}" 150 | meta: 151 | hostname: ansible-cluster-app2 152 | group: ansible 153 | register: openstackapp2 154 | 155 | - add_host: 156 | name: "{{ openstackapp2.private_ip }}" 157 | groupname: app 158 | -------------------------------------------------------------------------------- /raymon/config/stat.php: -------------------------------------------------------------------------------- 1 | 'http://example1.org/stat.json', 25 | 'example2.nl.json' => 'http://example2.nl/stat.json', 26 | 'special1.network.json' => 'http://special1.network.eu:8080/stat.json', 27 | 'special2.network.json' => 'https://special2.network.eu/stat.json' 28 | ); 29 | 30 | $pinglist = array( 31 | 'github.com', 32 | 'google.nl', 33 | 'tweakers.net', 34 | 'jupiterbroadcasting.com', 35 | 'lowendtalk.com', 36 | 'lowendbox.com' 37 | ); 38 | 39 | ## Set this to "secure" the history saving. This key has to be given as a parameter to save the history. 40 | $historykey = "8A29691737D"; 41 | 42 | #the below values set the threshold before a value gets shown in bold on the page. 43 | # Max updates available 44 | $maxupdates = "10"; 45 | # Max users concurrently logged in 46 | $maxusers = "3"; 47 | # Max load. 48 | $maxload = "2"; 49 | # Max disk usage (in percent) 50 | $maxdisk = "75"; 51 | # Max RAM usage (in percent) 52 | $maxram = "75"; 53 | 54 | #Set this or your logs will fill up your disk. 55 | date_default_timezone_set('Europe/Amsterdam'); 56 | 57 | ## END OF CONFIG 58 | 59 | 60 | 61 | function historystat($bestand,$host) { 62 | $files=array(); 63 | if ($handle = opendir('./history/')) { 64 | while (false !== ($entry = readdir($handle))) { 65 | $files[]=$entry; 66 | } 67 | closedir($handle); 68 | } else { 69 | echo "Error: cannot open direcotry './history' for file $bestand from host $host."; 70 | } 71 | rsort($files); 72 | 73 | foreach ($files as $key => $value) { 74 | $filename = explode(".", $value); 75 | $amount = count($filename); 76 | $timestamp = end($filename); 77 | $amount1 = intval($amount)-1; 78 | $bestandnaam = str_replace(",",".",implode(",",array_slice($filename, 0, $amount1))); 79 | if($bestandnaam == $bestand) { 80 | $jsonfile = "./history/" . $value; 81 | if ($jsonopen = file_get_contents($jsonfile)) { 82 | if (json_decode($jsonopen, true)) { 83 | $filedate = date("d.m.Y - H:i:s", $timestamp); 84 | echo "Date:$filedate\n"; 85 | linestat($jsonfile,$host); 86 | } else { 87 | echo "Cannot decode json file $jsonfile."; 88 | } 89 | } else { 90 | echo "Cannot open json file $jsonfile."; 91 | } 92 | } 93 | } 94 | 95 | } 96 | 97 | function shortstat($bestand,$host) { 98 | 99 | echo ""; 100 | linestat($bestand,$host); 101 | echo "

"; 102 | } 103 | 104 | function percentagebar($percentage) { 105 | 106 | $percentage = str_replace("%", "",$percentage); 107 | echo "
".$percentage . "%
"; 108 | echo "
"; 109 | echo "
"; 110 | echo "
"; 111 | } 112 | 113 | function savefile($bestand,$naam){ 114 | 115 | $curdir=getcwd(); 116 | if(!is_dir("{$curdir}/history")){ 117 | mkdir("${curdir}/history") or die("Cannot create history folder. Create it manually and make sure the webserver can write to it."); 118 | } else { 119 | $DATETIME=date('U'); 120 | $DATEHOUR=date('H'); 121 | $DATEINT=date('i'); 122 | $DATESEC=date('s'); 123 | 124 | 125 | if ($DATEINT=="05" || $DATEINT=="12" || $DATEINT=="15" || $DATEINT=="20" || $DATEINT=="25" || $DATEINT=="35" || $DATEINT=="40" || $DATEINT=="45" || $DATEINT=="50" || $DATEINT=="55") { 126 | $local_file=file_get_contents($bestand); 127 | $saved_local_file=file_put_contents("${curdir}/$naam", $local_file); 128 | 129 | } else { 130 | $local_file=file_get_contents($naam); 131 | } 132 | 133 | 134 | } 135 | 136 | } 137 | 138 | function savehistory($naam) { 139 | $curdir=getcwd(); 140 | $DATETIME=date('U'); 141 | if(!is_dir("{$curdir}/history")){ 142 | mkdir("${curdir}/history") or die("Cannot create history folder. Create it manually and make sure the webserver can write to it."); 143 | } 144 | $local_file=file_get_contents($naam); 145 | 146 | if (!file_put_contents("history/${naam}.${DATETIME}", $local_file)) { 147 | echo "File $naam could not be saved in history. Please check directory permissions on directory \'history\'."; 148 | } 149 | 150 | 151 | 152 | } 153 | 154 | function linestat($bestand,$host) { 155 | global $maxusers; 156 | global $maxupdates; 157 | global $maxload; 158 | global $maxdisk; 159 | global $maxram; 160 | 161 | if ($file = file_get_contents($bestand)) { 162 | if ($json_a = json_decode($file, true)) { 163 | $closed=0; 164 | $havestat = 0; 165 | if(is_array($json_a)) { 166 | ?> 167 | 168 | 169 | Uptime 170 | Services 171 | Load 172 | Users 173 | Updates 174 | HDD (T/U/F) 175 | RAM (T/U/F) 176 | NET RX 177 | NET TX 178 | 179 | 180 | 181 | 182 | $status) { 184 | if($status == "running") { 185 | echo '' . $service . ' up.
'; 186 | } elseif ($status == "not running") { 187 | echo '' . $service . ' down.
'; 188 | } 189 | } 190 | ?> 191 | 192 | $maxload) { 194 | echo "".round(floatval(str_replace(",", "",$json_a['Load'])),3).""; 195 | } else { 196 | echo "".round(floatval(str_replace(",", "",$json_a['Load'])),3).""; 197 | } 198 | 199 | if (intval($json_a['Users logged on']) > $maxusers) { 200 | echo "".$json_a['Users logged on'].""; 201 | } else { 202 | echo "".$json_a['Users logged on'].""; 203 | } 204 | 205 | if (intval($json_a['updatesavail']) > $maxupdates) { 206 | echo "".$json_a['updatesavail'].""; 207 | } else { 208 | echo "".$json_a['updatesavail'].""; 209 | } 210 | ?> 211 | 212 | "; 215 | echo "T: " . $json_a['Disk']['total'] . "
"; 216 | 217 | if (intval(str_replace("%", "",$json_a['Disk']['percentage'])) > $maxdisk ) { 218 | echo "U: " . $json_a['Disk']['used'] . "
"; 219 | echo "F: " . $json_a['Disk']['free'] . ""; 220 | } else { 221 | echo "U: " . $json_a['Disk']['used'] . "
"; 222 | echo "F: " . $json_a['Disk']['free']; 223 | } 224 | ?> 225 | 226 | 100) { 237 | $percent = 100; 238 | } 239 | percentagebar(round($percent * $scale)); 240 | echo "
"; 241 | echo "T: " . $json_a['Total RAM'] . " MB
"; 242 | if (intval(str_replace("%", "",round($percent * $scale))) > $maxram ) { 243 | echo "U: " . $used_ram . " MB
"; 244 | echo "F: " . $json_a['Free RAM'] . " MB"; 245 | } else { 246 | echo "U: " . $used_ram . " MB
"; 247 | echo "F: " . $json_a['Free RAM'] . " MB"; 248 | } 249 | ?> 250 | 251 | 1024000) { 259 | $rxmb = round((($rxmb / 1024) / 1024),2); 260 | echo $rxmb . " TB"; 261 | } 262 | 263 | ?> 264 | 265 | 1024000) { 273 | $txmb = round((($txmb / 1024) / 1024),2); 274 | echo $txmb . " TB"; 275 | } 276 | ?> 277 | 278 | 279 | ' . $host . ' DOWN from here. '; } 293 | $tA = microtime(true); 294 | return '' . $host . ' ' . round((($tA - $tB) * 1000), 0).' ms UP'; 295 | } 296 | 297 | function dosomething($bestand,$host,$actie){ 298 | if(!empty($bestand) && !empty($host) && !empty($actie)) { 299 | # this function should be called per item on a foreach loop. 300 | switch ($actie) { 301 | case 'shortstat': 302 | savefile($host,$bestand); 303 | $parsed_host=parse_url($host,PHP_URL_HOST); 304 | shortstat($bestand,$parsed_host); 305 | break; 306 | case 'historystat': 307 | echo ""; 308 | savefile($host,$bestand); 309 | $parsed_host=parse_url($host,PHP_URL_HOST); 310 | historystat($bestand,$parsed_host); 311 | echo "
"; 312 | break; 313 | } 314 | } 315 | } 316 | 317 | if ($_GET["action"] == "save" && $_GET["key"] == "$historykey") { 318 | foreach ($hostlist as $key => $value) { 319 | savehistory($key); 320 | echo "History for: ".$key . " saved.
\n" ; 321 | } 322 | die("History done.
\n"); 323 | 324 | } 325 | 326 | 327 | ?> 328 | 329 | 330 | Stats 331 | 332 | 333 | 334 | 335 | 336 | 337 | 338 | 339 | 359 | 363 | 364 |
365 |
366 | 370 |
371 | Ping monitor:"; 373 | foreach ($pinglist as $key => $value) { 374 | echo ping("$value",80,5) . ", "; 375 | } 376 | ?> 377 |

Server Status

378 | $value) { 380 | $host=parse_url($value,PHP_URL_HOST); 381 | echo "
Host: ${host}
"; 382 | dosomething($key,$value,"shortstat"); 383 | echo "
"; 384 | } 385 | ?> 386 |
387 |
388 | $value) { 390 | $host=parse_url($value,PHP_URL_HOST); 391 | echo "

History for host ${host}

\n"; 392 | echo "
"; 393 | dosomething($key,$value,"historystat"); 394 | echo "
"; 395 | } 396 | ?> 397 |
398 |
399 | 400 | -------------------------------------------------------------------------------- /tor/templates/tor.gpg.key.j2: -------------------------------------------------------------------------------- 1 | -----BEGIN PGP PUBLIC KEY BLOCK----- 2 | Version: GnuPG v1.4.12 (GNU/Linux) 3 | 4 | mQENBEqg7GsBCACsef8koRT8UyZxiv1Irke5nVpte54TDtTl1za1tOKfthmHbs2I 5 | 4DHWG3qrwGayw+6yb5mMFe0h9Ap9IbilA5a1IdRsdDgViyQQ3kvdfoavFHRxvGON 6 | tknIyk5Goa36GMBl84gQceRs/4Zx3kxqCV+JYXE9CmdkpkVrh2K3j5+ysDWfD/kO 7 | dTzwu3WHaAwL8d5MJAGQn2i6bTw4UHytrYemS1DdG/0EThCCyAnPmmb8iBkZlSW8 8 | 6MzVqTrN37yvYWTXk6MwKH50twaX5hzZAlSh9eqRjZLq51DDomO7EumXP90rS5mT 9 | QrS+wiYfGQttoZfbh3wl5ZjejgEjx+qrnOH7ABEBAAG0JmRlYi50b3Jwcm9qZWN0 10 | Lm9yZyBhcmNoaXZlIHNpZ25pbmcga2V5iEYEEBECAAYFAkqqojIACgkQ61qJaiiY 11 | i/WmOgCfTyf3NJ7wHTBckwAeE4MSt5ZtXVsAn0XDq8PWWnk4nK6TlevqK/VoWItF 12 | iEYEEBECAAYFAkqsYDUACgkQO50JPzGwl0voJwCcCSokiJSNY+yIr3nBPN/LJldb 13 | xekAmwfU60GeaWFwz7hqwVFL23xeTpyniEYEEBECAAYFAkt9ndgACgkQYhWWT1sX 14 | KrI5TACfcBPbsaPA1AUVVXXPv0KeWFYgVaIAoMr3jwd1NYVD6Te3D+yJhGzzCD6P 15 | iEYEEBECAAYFAkt+li8ACgkQTlMAGaGhvAU4FwCfX3H4Ggm/x0yIAvmt4CW8AP9F 16 | 5D8AoKapuwbjsGncT3UdNFiHminAaq1tiEYEEBECAAYFAky6mjsACgkQhfcmMSeh 17 | yJpL+gCggxs4C5o+Oznk7WmFrPQ3lbnfDKIAni4p20aRuwx6QWGH8holjzTSmm5F 18 | iEYEEBECAAYFAlMI0FEACgkQhEMxewZV94DLagCcDG5SR00+00VHzBVE6fDg027e 19 | N2sAnjNLOYbRSBxBnELUDKC7Vjaz/sAMiEwEExECAAwFAkqg7nQFgwll/3cACgkQ 20 | 3nqvbpTAnH+GJACgxPkSbEp+WQCLZTLBP30+5AandyQAniMm5s8k2ccV4I1nr9O0 21 | qYejOJTiiF4EEBEIAAYFAkzBD8YACgkQazeBLFtU1oxDCAD+KUQ7nSRJqZOY0CI6 22 | nAD7tak9K7Jlk0ORJcT3i6ZDyD8A/33aBXzMw0knTTdJ6DufeQYBTMK+CNXM+hkr 23 | HfBggPDXiF4EEBEIAAYFAk4Mhd4ACgkQg6I5C/2iihoNrwEAzOrMMTbCho8OsG/t 24 | DxgnlwY9x/kBIqCfCdKLrZCMk9UA/i+YGBQCHg1MaZzZrfbSeoE7/qyZOYDYzq78 25 | +0E16WLZiF4EEBEKAAYFAlFVUVkACgkQh1gyehCfJZHbYgEAg6q8LKukKxNabqo2 26 | ovHBryFHWOVFogVY+iI605rwHZQA/1hKq3rEa8EHaDyeseFSiciQckDwrib5X5ep 27 | 86ZwYNi8iJwEEAECAAYFAkzUfOUACgkQ47Feim8Q/EJp2gP/dFeyE02Rn3W723u/ 28 | 7rLss69unufYLR5rEXUsSZ+8xt754PrTI4w02qcGOL05P+bOwbIZRhU9lcNZJetV 29 | YQtL3/sBVAIBoZVe3B+w0MiTWgRXcSdJ89FyfoGyowzdoAO7SuVWwA/I/DP7CRup 30 | vHC5hZpeffr/nmKOFQP135eakWCJARwEEAECAAYFAkyRaqYACgkQY5Cb4ntdZmsm 31 | WggAxgz83X4rA51TyuvIZye78dbgoHZDCsgCZjV3GtLcCImJdaCpmfetYdWOalCT 32 | o9NgI7cSoHiPm9YUcBgMUOLkvGx7WI+j5/5lytENxtZcNEOjPquJg3Y98ywHh0f1 33 | qMgkExVl9oJoHeOgtF0JKqX2PZpnz2caSqIpTMZYV+M+k8cWEYsG8WTgf48IWTAj 34 | TKF8eUmAwtwHKEal1nd8AsMMuZbL/Fwt93EHf3Pl2ySAuIc7uJU4953Q5abaSafU 35 | jzUlIjXvGA9LMEiE1/kdbszuJeiy2r8NNo/zAIX1Yt3RKX/JbeGSmkVVBwf1z07F 36 | JsWMe4zrQ8q/sP5T52RTIQBAg4kBHAQQAQIABgUCToOsZAAKCRD9hPy49bQwR2LN 37 | B/4tEamTJhxWcReIVRS4mIxmVZKhN4WwWVMt0FWPECVxNqdbk9RnU75/PGFJOO0C 38 | ARmbVQlS/dFonEaUx45VX7WjoXvHOxpM4VqOMAoPCt8/1Z29HKILkiu91+4kHpMc 39 | KSC7mXTKgzEA3IFeL2UQ8cU+WU6TqxON8ST0uUlOfVC7Ldzmpv0YmCJJsD7uxLoA 40 | 7vCgTnZPF0AmPEH48zV238VkYbiGN4fdaaNS19qGbVSUG1YsRWV47PgQVfBNASs2 41 | kd8FpF4l5w58ln/fQ4YQk1aQ2SauD553W4uwT4rYPEQdMUJl3zc49AYemL6phy/1 42 | IMMxjHPN2XKeQ6fkOhHTPzs3iQEcBBABAgAGBQJQSx6AAAoJEH+pHtoamZ2Ehb0I 43 | AJzD7va1uonOpQiUuIRmUpoyYQ0EXOa+jlWpO8DQ/RPORPM1IEGIsDZ3kTx6UJ+Z 44 | ha1TAisQJzuLqAeNRaRUo0Tt3elIUgI+oDNKRWGEpc4Z8/Rv4s6zBnPBkDwCEslA 45 | eFj3fnbLSR+9fHF0eD/u1Pj7uPyM23kiwWSnG4KQCyZhHPKRjhmBg1UhEA25fOr8 46 | p9yHuMqTjadMbp3+S8lBI3MZBXOKl2JUPRIZFe6rXqx+SVJjRW6cXMGHhe6QQGIS 47 | zQBeBobqQnSim08sr18jvhleKqegGZVs1YhadZQzmQBNJXNT/YmVX9cyrpktkHAP 48 | GRQ8NyjRSPwkRZAqaBnB71CJARwEEAECAAYFAlEuf78ACgkQdxZ3RMno5CjA8Qf+ 49 | LM8nZhjvJyGdngan05EKqwc5HAppi34pctNpSreJvNxSBXQ4vydVckvdAJNIttGe 50 | WjVDr6Z61w6+h9rMoUwZkKMLU5wii5qJkvwGtPw5JZVe6ecEKJrr/p9tkMjIjTHe 51 | neYrm+zGJAx/F8eCy+CzWwGacLw1w68IHHH6zsJZRhyNlSBc9ZJANRzXRPWc0tzH 52 | fT7HtiN2dQK2OlFLRr+4t9KLFae0MsNRr4M6nBtOX+CBP4OdKTbeASyXnK8Gbpnp 53 | Ejn0b4isr6eoMcJbNwVBX4XnI5RG/Ugur4es9ktOQkUFxy8Zpp8/vk/+hyWHunr1 54 | G2ema2dak8zHIa7G2T8Bb4kBHAQQAQIABgUCUtmKKwAKCRBI64stZr6841y+B/92 55 | de8LDKj4UjfV05o6e0Ln6lIRgxpexbgqyQ7A/odZ9K8B/N9cNNaFZJR4tAAt+E8X 56 | ahcyd3qn0rspvI7cdwl4pslO+DIsdoejuL8g7SBDWCjE9sQLEDLxG2hqUkCrc5mh 57 | 6MeAXcrK12LKCq1uMPQzc2P5Prz2C4j0XITBzSGxukxtoC/vj93+h/gGcQUzQIq3 58 | L4QE1q8XF6bqTFpt6i+tJULSZdrFNkcg3zx0BkLAceGCd+BDv++M4BRpWuzkXH/t 59 | FpXq/rehuh3ZSstkvpqZot+q34GMCgGUvsM/U18akYJFYpog25rdYTLTs3eYSqR1 60 | ef6BQ4lhGWDx4ev41YIriQEcBBABAgAGBQJTBnZtAAoJENgv4DzFW8/jPXAH/ROb 61 | XOYzaU0R8ludCEhJcWlx3IibYRCQZUcQUUTdiPHEiEVq2vPruujvL9KmK2c5lvK3 62 | TGuPm804F9MpCBWA6GSM8txmIndPIUuAKoZP/dErMo+A699BbBesTGY0v1pF6eyK 63 | PA5cgh6cOaUXHCCOl5LPiWN664Euwk+IUM8bi3Qx78PopW+E0EJehd3PLkC5XyBI 64 | Ie6YI9ovXe8K0B0DMMWDydgdafTjGCB/nSO/C1qpa7tVwvGLFdh9qhKndb1kbFYB 65 | Hv957ZhXQoLFo9D1IAPEzXEr3q9FsNgaVvJNlJj73pjesO6DNfBEXHHr6IbGl/Ir 66 | mH+Wgo7Zm4RIYW8DfTiJARwEEwECAAYFAkzhRMsACgkQTsYHIylgbnfbuggAwM65 67 | VhsyIv1qfHT6xG4QRBltjWi0KhMIh/ysMQEDDREE9i5c59wyQdY0/N+iiFbqoCN4 68 | QrzfUBI9WDdy1rkK2af+YzZ6E7dj5cIS16dNkk/xm0eDelkS3g+1Bo4G2tbGpfWH 69 | rfcoQhrRrt0BJpTgo5mD9LIqgKFxKvalj6O3MNpyxnyr9637PPaCS129wNKQm6uQ 70 | +OU5HH0JxYWE53s8U/hlafQDQCS58ylsteGVUkKZLKTLIbQOifcL2LuwbTjnfTco 71 | 3LoID6WO9yb8QF54xa8sx2OvnVeaQYWNoCzgvLDQJ8qP241l2uI61JW0faRwyY1K 72 | 9xSWfYEVlMGjY15EoYkBPAQTAQIAJgIbAwYLCQgHAwIEFQIIAwQWAgMBAh4BAheA 73 | BQJQPjNuBQkNIhUAAAoJEO6MvJ6Ibd2JGbAH/2fjtebQ7xsC8zUTnjIk8jmeH8kN 74 | Zcp1KTkt31CZd6jN9KFj5dbSuaXQGYMJXi9AqPHdux79eM6QjsMCN4bYJe3bA/CE 75 | ueuL9bBxsfl9any8yJ8BcSJVcc61W4VDXi0iogSeqsHGagCHqXkti7/pd5RCzr42 76 | x0OG8eQ6qFWZ9LlKpLIdz5MjfQ7uJWdlhok5taSFg8WPJCSIMaQxRC93uYv3CEMu 77 | sLH3hNjcNk9KqMZ/rFkr8AVIo7X6tCuNcOI6RLJ5o4mUNJflU8HKBpRf6ELhJAFf 78 | hV0Ai8Numtmj1F4s7bZTyDSfCYjc5evI/BWjJ6pGhQMyX32zPA9VDmVXZp2JATwE 79 | EwECACYFAkqg7GsCGwMFCQlmAYAGCwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRDu 80 | jLyeiG3diZVQCACkzLIVoHG+J7X+MAyhDe5kuOes/9JYr1K/1MiKInGKDg8tI2f5 81 | 2W7URmJwWhlobH7AsyE464KgsoBV+jdFIXZaC8F4+TGo3TeEgs8BW5PaS6z7t/vL 82 | 3bqWRHLaYX55ZV3kJkOgcREAEW2BfRl/bhMHP3QLNrW277U5aJiAPvOLbgAiUXBx 83 | ThSn356NlYZIbgnfI5mPnGQQDGDlEMp+RuxFVX5meprFwOp74am7gEUSI2/Wv87J 84 | kgDwVtYSOfSudcMDjKc6yjCf2lpATQ2swgeuHp9H34JTJ4XOf7EWE9o2GjuTwYDM 85 | LTZZy1MZfmVlEFou3swMLWLp5qNVwLV5eC09iQFcBBABAgAGBQJTe7BxAAoJEIpy 86 | +RP1DGTa65MJ/jFMCfu1A9qrnC7owiOLOqtS5ieH/UHhsVbzkdSsnjkHGotndrWa 87 | TkqXaGJXpsugJcf6w6sKsOZ2xgG6F62H2XMNNxOWNMRlH2/m9jKMOXGS5G8KJnb8 88 | ouaNf2bmyDo1Q86B5DFzdHQlDoP4iB6+SBOuA3UX54QeNL6uHiewnO5FSJS9mc4g 89 | fVVz3Yh6N45Zp+DusBj8C8JhYkvbRhsnZzENdrvAZz1IGfyH7s2+nT5DhD3kF1ba 90 | 7HZpF+YEQbYm4Lgo9izBkkj0xYqXN5ARbpp6BenKdO1WJ4bwjFr4hWKqpESTaJpe 91 | 1+jFW6+KUFRy7nD2A0IkX58cdmNSivmoe9nP3X/pPQ0GUKpmtyPCU42xF5KHVK17 92 | PxIsRmTU3JiYbnqe8JphJvUW86/6cIGPPklqjIRF+C/gucj4DWFteqfdiQGcBBAB 93 | AgAGBQJTf7NxAAoJEE+jjoIuT6SPUxYL/jhcveiK6F9n+W/5UYVfEtEjtuQ3etmu 94 | fUR37ow41TmrP2lP1oXgLkf8iACYgdbENoDfTNJcAQAZdjfO1dg5ybj+mejH9avq 95 | xcbzesbkz8vh+oPQ8y/uYJtW3puwg1fkVOsJVRhTsSux2kKAxHInyApGH+0aw61v 96 | Ixn/er0T6HKr9wa7wOOF8SO1BkOE/UamrhzJmtNF6+X5Db7mO4DhsLJSXTQNAsd0 97 | K4QSOvKrhdaXRDYIQ0OGRVX5aS9+4g4hWBbXWPv4yYxcvMwzKTgxm3OB50lbxpTb 98 | +rSY+ffXffnsT0PAxmP1e8HckLHh1GMFHrvPBjJJW+crwoxZU09ZyV5Wtj6iPWX2 99 | myr53imSO0Br9x3WYF+HuHq4rOR6SP/Na4lXT4cRspGaYGlzxJZzhBE1SsIGTu9V 100 | 8awEsKaIIr1GKPXOCvCKGvD9PvsFy85zGFk13dj1Wz29q8TEXT/kipyXBy4mpA7l 101 | 9EqYwIWLWvBPllxYA2Yld1o3wf/sPVY1e4kBnAQQAQoABgUCU3gBqAAKCRBEKQe1 102 | iN/EV9pJDACOZEkc0GdNhDZKW7L0rOaC2JWXGXoxrGvbMFC9e/j+YfZKXYF4VZoz 103 | ySLJDOXpdlXn+rxMKSayRbQ3xg6X6XoCEUa6Hg9LxRZ7TnR9zEkIHpnpdQV29VDU 104 | XV0jds1yabmWJtwxI0U1LulAXWprPh+ZWeLA/HV7MF0C93ifiXh2KmDZtb3eI8yL 105 | LUkNFzVSbpT+i5bYqcPuXn8+RApYvNLH3rG9+KYMPoNdtb7/ucWGV7t+hMtKUU5z 106 | J65cI5AJrThdA4kYpm2JWnUDHcy/y3UDS3YTNiVNMpkIIIJr0QNF4jWbsQlnv5Im 107 | 55pbq7r9O4zzzuXgUW4z7oGjDQgQLwUnI91mD8Gqdne0rjFKbe0XqjFzwiRpAmKn 108 | 1HvqE9JZF/NzuihNvFCvNhXNNlr+lvpF8vltOLdD0PXEJGM+2Rkhx4xZ8+2fKaVM 109 | gPB15/NNPTDGPW9NKW6oyKHQs4NEOubKwNLH0wF8f7OGKDB6igIKPTAuY6SZPQ+O 110 | Am8v1MEbkdWJAZwEEAEKAAYFAlN42xAACgkQ8u8vRwaei4/l7Av7BU6JySYZ34Oi 111 | 63jhgCY9dLN0/Up6u9pEBYhAsV1rtcVvaSvAbAimTDuFH2qEAb8JZsX9ZFCpYH0z 112 | apARoc/steM+4JBV/3DlQlfMpQF+3/89EXojME6byD7IdjUwmjfHc8N3YHKbaGev 113 | iJXi15WegeK60Bwo+d2m7qJ1pOSGC424o5zWFeoguLfH1ZFWLV3ZQvwxqTJqZXPa 114 | TaxGS0pARniCgRir7yQV5P9PQMf9sHk3c8QrCkr+/TdPh8PyKEwVjBuU3tj5HHSa 115 | ti2EIhBil8BUDp5ePIAG/iRGP+XUbWKvxqvU+eseI2nYe/VqKkO98Nmc2mJzDJI6 116 | t3JCCmMZqjvkinDVE1wK5L0NZRQ85hhBB+ScJ5QyC1AOqgkFiITfCnbhBnDc+yzP 117 | IyTzptBaoa6F2uYD3vPBf1/WZ7YF2HDIs+QElW3/q3ZOn6/HTRWaFtAotuk8gALO 118 | oAbDxG1THUbluZKNGAC6jrFY4La701zG3fe7/NDtS8L/BDYXAccPiQGcBBABCgAG 119 | BQJTgMDpAAoJECrT7UPn2xWPmkMMAL4yd46a6fXMCrtZJJmZ/4AjcHNgahg3ruUn 120 | xBdNyAOBupcC0ZC+nvUbYi326B08R7JDhPcPE0hgDa4LfjiF2xUICdAc7o0y9HkD 121 | WoFSV3GnXDsZ+BXf2nUA3PEM3AaPDB9RB+qT1UQ6nj6m663ijZ+HIyaQL1FobNZX 122 | m1o/JYvU334mp81+yNYULmp5L0k4tO+GaTaUtUqfDu4qW6JBXOvvKP+CpyzIDaw9 123 | +z3fxwiv3oK95N13d3UhZxESsHvZ7fNvQ8oq+u1eN8AyhkbS+uwDhKjQPZndjvXR 124 | saAtDVZgOVHDtgzv9co3ZZYgFbEs18SNUi5WekcsJM12ae0unmhoGwL1OsPBQwNn 125 | kMSujVceKNfeHvHk2L5dTz1mh1J5DL268vCSCOJkjEPgN8JdA1zsjZruH4zeu+7c 126 | t4EhB7e/8o0wOmAvbQDIvFSzv96p7TNabcXix9i7vLAD+aQfUDPfFNaP8jvpaWYo 127 | 9J4mk0SL9+iXQ8KxMne9Z2m7UeS3gokBnAQRAQoABgUCTqmiPwAKCRCg8hPxRutY 128 | H4lKC/9YYwjHjABrogdB2sb49JIiM2Dqe+G++GizVTZsmV26PJXWQLKr2zKZDMLk 129 | 3l/b9YLVkuFeG2K035HPFCtpWIlxkxpbarI5i9F0NjMmgaIyqvh14xNhDS6NHgio 130 | DdNKvdNI5LYtWXGREjYJVCBIwdxWZHi5JsQgV2E0vfIZGDKWFfMIF2xrt6x0uvhW 131 | ZnD94ecU0Dd8sFz7TKJoCdzfdYpoj5ROenLGJ7OcDMULknSA4NEVIEY0BVyQCb3T 132 | CjfboCRxRdXs+6yz4YEqTCzPNvQqIKKO6MA/X3ytmUokRZIVmU8es4iZxYUXrHKe 133 | MzrvYVpbwwHwpziGwBr+SOkrS5iv5c1V1Nb+pSajtzAm4tQnNoyjvB2YsEOvTLUN 134 | gaScY5O7Xu/FGhI6E9Y8KbD7nb2t9XdtEFgHiq1ST15tiew6YNCatVA/GW3r97ed 135 | iBjqAX35hqFSZ05yaNDlCgfKxrRiv2SHu+hutAX7cVLTAetm2mrJBb0ip7hQKrmU 136 | OpziT7iJAfAEEAECAAYFAlKfzT0ACgkQ/bW4wGfyU4fk7A6fayMhAuOjAsP5s7Ge 137 | bYVzRI8Aj5Qmp4w7DyJRYpwTzyIVPXzLTpOmpQRp4sChlIA9YM/Ho8jhacvpBKDP 138 | uJr3p2DhVTUVL+BRRWoTFJyrlbC20ftr3nCOMEW4yHA2u8bKvHwPIUzasqqPtybJ 139 | 2wdjXx7V5W6TpwWnpJFHl6TyqFEsb0b/Ne61Tx7mB8m/0UUjKyu43O0k5p49dFA7 140 | FUUlmaZmjGrfdxSN3HbwRXbaOmWYn4q7TRL56BmLWZklxwXCY1nwEXdkC/R0U0s6 141 | NNU4o07hahbc202SzLX9PaHCEAREVlTz2nVdIXcPUdo3hOIJhE/2mbfKTqB8WRgE 142 | 5jfXzdogJBhP7D4pV2DyvE+SKvIXQ1Xp/2SN9hLWwBg+pQwjMpiFX+HVRw+6p7Qo 143 | rR/k2kryhtc7aUnMtkTuCq1tzzwbdGD7e8O6QPhuhId06GbqKLplqYPap2sVAONE 144 | 6NHLzmWaY0nFdzXiICXSk0oTUS9NwmAn0WdCeC1pJi6T5iyopxDNMyIFFTBTDFjx 145 | WbeMo6HRKsbjnhEEayV4bwJ8IaPjhvEUTpDgyV28kCSRgJ8zvNLDD+nms6k39K7c 146 | 0xjiBgIek47zMp6bgTPAn0Q23hwCMf+FiQHwBBABAgAGBQJS0swMAAoJEKQiudjl 147 | J9vbtnQOn04QseTRPp6toW3qTzPs2vFToGrZWuhRDFxEUEuR1GGM3UFWvk/a7Una 148 | HsaXLqZqqKIdqWlCb1EwddFJKiZU+Fq/sRm86VAeK6OQkNwMtbIugW2WC9MPre8D 149 | 9gVudx5ZjYBNjqCnX+yn+33M7/LAa6Tr7GVUqV3aM0ltCmQHABRp1acQWkWLG3IQ 150 | iA5Ty64hXrCPr/dXLCyFsbUyXccvgTiqlKo5OCh6xC8vLI2OUjckvwoH5yWM3EnE 151 | E4TmypGAHk+EP2aVkNflYWMvcRbBAeLVKk8+a6+JyJJnLRKHDTKN6++kyceeTN4f 152 | b1Bv2AN+S+WZLkeTatibeq+78jn3ES2Yl9Jdik7KF7cSx9+Y7EcSoua1DXZzHVO4 153 | rPSBcWeH4yb+3ET6xUeyK4+iZqd/067qTxED6ZDf7vXk/8+GiobRC7ob4Y0IigH7 154 | bWWfxiv6DBuwpcRipVAhMReoOR42UIfL1IWOk9d/lcmHjmTiYvG6XRMcDAu3VHjU 155 | KE/jb/6vcq5hZ9dcBSzPQJ/mR9AtiqnA3Y6RfK1UrbpQ3rJUu4UF61NTi4la0kFA 156 | ETcfJS2rTRgBJ+tbL0hPPVC/81ZzjF2mgnvz0CfVxXpQ7un2iLnRKKd7q4kB8AQQ 157 | AQoABgUCUoYE7QAKCRA3hSP8C59JlsU5Dp9scQR6vHTN/oW0sbb8TIrTSr3nJBjf 158 | rdZ8zp9e8RsQd7xFOE+hmzDEsCf3rJ9M1ZcuENLria4Oe2w8+l6eTzqyI3On0Gvy 159 | o3V8qRlja0GPJS28bP2L6lpBajBc98V5Xla62OpFjdyfrFjhJdY1bJ9+vddNiq5u 160 | Wdaziz7Im4dB5vdeJZfijyn1yyZO4LmwmCZmmS+mlGDNGkcB3xULPML9wwRV5Xb5 161 | xsWifiwfDk0lUBdtpGhn8/hKgDpLZPPRIHUVY1rW/1MS7nv9/uXxdh0jqcvjLqKa 162 | cn9FmqCkbuAlSolLBL+K7zQkmqdknTfSFatFkcenaT5lav+RmAoYLEdtEJxA2kkK 163 | Y+ZK4lxxUAFYiSgqOBVHiNcNuH6xoZgufZ5RTPGRmZ3vFoNI1QGE/ap20qUYVzwR 164 | IvSpmTCLEuGamBSAWx/sP9sYYnJWSE6OLiskNnH0jQrmzQmsOsSo7SfNih3Tyztp 165 | o5xCuAxhxADvH9ncGVK5FGjoorrV047jZLNJsVIRblnwgR9KhRgejI3zWRt8L6P+ 166 | rV2nCD4GHD09OpJZ8uJcjofXlfsf8iDo0MrRT2/2z6CqaobQC1CEScISoo1kVXUr 167 | yudj4H6U1cwDQ8iDEDSJAfAEEAEKAAYFAlKNSOMACgkQxAlKf6mrlIEpXg6fQbNv 168 | evZIDHgIz3MCEahDDzXIzfhZQeuQMRKPZOsrDSwtgszMzfCuITMVP7zxu6jBIFPy 169 | Hc400OKNuPw12NhAJ9j3RuqABE5n5b392ZXYL10nGa/mRYlD/kPaSy5Wm7UtTIro 170 | BZcSRXUqkhxEcI/4pvwRZowSM89Bs0nUqsXMAV8hR4VNsEvmQyYkrlphVSIdactm 171 | rRIrk6VYaoGKG5Esjs4igE6OVyhe8WgSBKSBQqlqkToL1WQStbrRYIr3WwyFprwJ 172 | j2lUM9pqcwbajvcF9LeMY/KWbgfb7yOUnX6XmJ62+jKKo5JPWIh86xuLfSbG5Ung 173 | kG8q2ZqmqkHBPp428KI8qwzKMHmhYjkhVsrwwMhphAz1Mv6fIwaGXX0bNxQw/y1l 174 | GUKJ8z1v/OYwy3WyBVjSSgXwBl+Hcej0ovJeo0xS1cAVJCsBxNs3lKbOOUm2X6HP 175 | q6GIcf2QB5JfgwVFdfokN9nHIDDvYmomRlo8QApAKmh2Q3A0b1vZ150gGxNoP0ig 176 | SSvIO1HXupXZ4/CUJ7Ka7A2cZVVonyEtsjtkBNshpKftc28wYzyFfQ3+tqbmdO+t 177 | HaVobWjWyzmjG2Ds99OuEzIJOvNNzFzxWyQCC1smOjKLiQIbBBABAgAGBQJRVI1U 178 | AAoJEH59M1xaLV7BfjEP93RHY4cUujjYok+whz1bPcAw7uZ+lsKZJdzGgz4LJ338 179 | ZPaqAC32GAi2/eKqD12P4BuM81Qp+kYbhDgP/H+1vTXwLcXhTU8sL89DaVhfRyy+ 180 | 6/canky42kK4dLltbiQvI6lFmHUmuqeIvwrUynxx0GEx/of5lyRC5dKEYfA6EbIE 181 | y7dftuJeKRK92BCZsNAy1oNvG5hn4kZcXS5pw9WeUBTBbUXnkgiGZQchXu36mhij 182 | /wzQcn+UH3Llq5qaslPhpyw9ZL59gcCN8Z+lITtPtBa6IgonYSalVrmGkaHMgj1q 183 | EXTC0dMqQXt3tFhXWnVRQDme7xAVOil7x/IREkNmDzwgK35Olcnv48n1MdxeA0j6 184 | lqkrLbAoc9+XLoH7TvUEDifRRfS1XvOpaT1rGg36fJQv3v+TnvaQtNvFqGsERhem 185 | F016qpcdr+51alNhorBIZiYiiFNIioFmHtdr+uEuzHZR+BHkHRZbfgTxxPO6JnOJ 186 | hqUgL/3r3i+M609pJOmI/IrYGf+A4XrNcSq1Rg9N1V9gAPKEPTS40DECyaCaO74q 187 | ShiXqPigbdrvUtlZx1D6HHef5OCk3HnWFeWp2/yDUeaQYzkolbfVmDF8+aqA7ol9 188 | SJa5xFoYc2MDYB2KdT6kWr2uUhodZrEb7HD60U211MPGabNqPakKxhTP+pMA3fWJ 189 | AhwEEAECAAYFAktpE+EACgkQxel8K2OfamZhpg/+P9NPk88rqRnEuDVDHodlkA5h 190 | G0d0Yi5vkV9rw07yjYut474aUd3FjJFqNEoiW+6dFbNy6YqqYPhrXLtnfJl5LAUJ 191 | UzMA2aSLtbuX+cq18DCv5ZmU4DW6kZOWi5vX7QkQCTTLP03VlcD3Gu6HyofseBMg 192 | E4zoEXdmZSZmPnOygakFLzC9w+D1XfK2gcaTKjAJJdW80aY56eUezFDKLhOw+YzI 193 | K1/ZeeOTS4LeITtTq5J6/hnwHrJdjApX80v2WJzVVoy7lQbxAPslJHZdYVFCBy2T 194 | yk7kYdddVxYCcdYr0e8A+GfG/tQJGxvZ3O4nOrezSv0XmlhLZ5rjCn8M6fg/NKUX 195 | sPtXiac+DQJbr5RwQ5Sc7bnPVsCywqetOeA+xv3L2wi94rg4u97QiwqhDW0SE9zZ 196 | uQL5vaXl/GFpaRXs+mVGATS9h+0lDBQPi21oPkdN/BKKzr//2GCl5VFb+rkOY65H 197 | thCuiIrT8jFGArJIF4nXku/4BPpNrganC89iTsd5+UUNFIlta+WYkENQ9tC2mwj9 198 | 6BaK0KyRQZP9AAzTo5wG8aouczptpwSH0aECJNy8kd/UR8IAkZkxjY4+zyfQDlb4 199 | aNDsVGvempgjFcNo0rciKrPQl5GyRLQj2azuv46gaGcYzqsobejS/2jqJLMnkTeE 200 | xaCryrWuXo/raWBWQLOJAhwEEAECAAYFAkybgq4ACgkQ2HRyfjOaf6huKQ//Yfey 201 | 5BJXqZqIt9i6tyw2VqzMtZ1gAqFdEKeuSmz30xty9g6KknIjpeZo+POb3rQFUKGZ 202 | /q4AjWKdD9C5WUvLcXd0RCWeDG7dmD78h35OWwqhc+8FXO1vU0nGyFdEx89cNiO4 203 | 2M/z+eYeoysgVL3ixbCjJlrN4MHrilqshxH5MvG7JfIfoPwucQytNcwSa8T9kTlm 204 | C9uSl1rwEllKlDNabxMpsf+9T0kZtI+KQrvMBg8A4RRJhpP13Bt6y949FbR4zva7 205 | kqV24h+5c/bKsgY4PXXM+AnIuXy+Dq1aRVgRLhWypJqc73UnpD/MDDOPKX8nkF3F 206 | 0mjcfEso6KtvNsniPCr5GKcnvoGu38qlQ7ILm2Pv0tjBHNIYQNG9xPn2TMH74D6f 207 | 88NahHj33Ha7PG8Jn/dZMuKg7qEeHit7+lJDn18cTT8xIMMUpl9ApmjLuWwo5eTX 208 | ysai7PQQU/ezEbOgYqznBKEFK+CXH6KINnGH13d/r9L71AZj/KZsI+c7E0imLwUS 209 | tvJEZr2M9nR+ybA4SN6/kwcF5n2kx+lBJjqBn72hb0wyaXXtTYFGderuYIGsxEx8 210 | imbIBDtX6rWOMIrZAHlPBS5NTj4Hye14XcChR/AodmXrgJD/z+8+sDGGZpHAc291 211 | wknHO++j22vF47Q2VSt8T+WM6Tx8vq0+Wsnui/iJAhwEEAECAAYFAk0YnfAACgkQ 212 | g7W4Fhob1Q4xdw//SQDVJF1h3bg5F1F2RXKEjxCoETj36x5XeISUqyTp7nhq5pAO 213 | GlIVd8IttJja2YSfEPYBYNXBQMe0E+4pdESvzK0HjgwpWbkCvszWj+d6v5Emwx1h 214 | HVvEvEXiPky4+BMtpkoSzPGYjkL56jzC9/mF4XC0irbcfE77sO1g7ZcpYuP+TtDc 215 | rvrRi8u07vZrCp8xev4zFfnv8vqrJZ2Iw182JPW4a/0GmLFEYHrFsM3Xy6elUqTe 216 | ws9wTehzjhR2RK7FWuoddUzNQjGlAao5/4piT2zlnQqKOWUrT26hkjdbIo5HYjmI 217 | YkO4EHNu4wY/bcavcgxFOZU0ARaaOnGAsLaPW3zShxoCSC56NMApOONcfUxYkegR 218 | KwFcp1uTbG4C+e+pCpnvB5l8sIFLhehIZ2De154Zr9lA4cOSesu6upprZqEUbFwb 219 | iU6t/Q4FlYxoj0qPceetxOGYQ7AijMvhIOZS+9DHIrmze5mOmEUDLFGFDHV8IqGf 220 | ID4PyhW2dNn27G1Ray0gorlGU4LFYkfehu8QaYaL7oT7OAOwr6K+ozyAWOPdeYyk 221 | 7p2Hf/JnoHxvyRqePhxiUIeGl4nu/Xob4W8aH9owGUKilcC+hOmvFCBdrXYyBjNg 222 | ZHK6+I5X49aj5FNM9HJiYn1EsAMpJ0fADxVS8en6fFm5wRLUz0/FWr3aGFaJAhwE 223 | EAECAAYFAlB7MXIACgkQU5xDa4LkC/fbpRAAsHOvH3Xi6z+VP6ESd/PlpEMTtPA6 224 | Gu3bKY9XMu+t5Fxpc6du8b15XPcVjBJC2XhRums/rCN2L1ui6tT2WM0ES7I4DCgz 225 | QzAzAecUd4OuZUVuCindKQrOfwkcbNoQz5OrpYodhNdjf8qsIF07LWxA815mgXv+ 226 | urhnbQJytBop13RPucATLtMtAhcScoJApraP4TnmLnzh0iyHFVa827Cx95nrj1/Y 227 | VMYzeESDnbsFnh4tCFlAseSMhj7TDQQH1/gCFWJl+61qRB/m6pX2hGWCYeZCw3m8 228 | wqvILUbXkc70c9Iwl/2a+0mbtT7JI0TfnjC3ZDYLBfU10MtrxRTOWkaBHpx3g+YD 229 | JWvKQRZ22T/gAOJz627ilMlXH3ayyCIEBCiL8YynrUo9zFdT07h+WDQcNiN6sa4J 230 | q7/mJQpZosv1UF7dh3OehAELPCq5OzdNPW2hceOK6MYWjlquXl6U+/h419T9LRh/ 231 | dqC5hvCPa9WsNuncnFiMmi7GSXFDYniM5cPVx6GNn7EVF92fJQNXaj0XO9YJzc3T 232 | i9qvtbHz7Sa2iTQ3TFOQm6c3yuTG9VS6HfbMmUWW9lfi7rVljjAjeE/PTvYAUF2q 233 | /4HrjkeuTTgdu73eJnlmBwgI1mmnJ3rFl4G+poRldL5m/3YcJvEgFL0/vMHexeZB 234 | PnuO6xVJR1C18nCJAhwEEAECAAYFAlB7MXIACgkQU5xDa4LkC/fbpRAAsHOvH3Xi 235 | 6z+VP6ESd/PlpEMTtPA6Gu3bKY9XMu+t5Fxpc6du8b15XPcVjBJC2XhRums/rCN2 236 | L1ui6tT2WM0ES7I4DCgzQzAzAecUd4OuZUVuCindKQrOfwkcbNoQz5OrpYodhNdj 237 | f8qsIF07LWxA815mgXv+urhnbQJytBop13RPucATLtMtAhcScoJApraP4TnmLnzh 238 | 0iyHFVa827Cx95nrj1/YVMYzeESDnbsFnh4tCFlAseSMhj7TDQQH1/gCFWJl+61q 239 | RB/m6pX2hGWCYeZCw3m8wqvILUbXkc70c9Iwl/2a+0mbtT7JI0TfnjC3ZDYLBfU1 240 | 0MtrxRTOWkaBHpx3g+YDJWvKQRZ22T/gAOJz627ilMlXH3ayyCIEBCiL8YynrUo9 241 | zFdT07h+WDQcNiN6sa4Jq7/mJQpZosv1UF7d//////////////////////////// 242 | //////////////////////////////////////////////////////////////// 243 | //////////////////////////////////////////////////////////////// 244 | //////////////////////////////////////////////////////////////// 245 | //////////////////////////////////+JAhwEEAECAAYFAlFwaUEACgkQuW8j 246 | AK0Ry+6hbA/9F4vOEUpaVz8Xfky83I7W6zP6q+z5KuUC3Bo1y/cN32KHSbD5sf5T 247 | 49VWBeWTWDQ1j2E01EvG3aZRz6aD22036FrRGSpRixiODVaP1sO5HRr7cOG25L2G 248 | ESNasEFPdRtNxZPmXEqRSDLhKP4OHQ3vyykejaitQ3epHDdWQdjiFZzEC+Vet64S 249 | /onsiTi5n7wwyAkWV3ihWEyadY6szC3XQPnxRG9mwbKkj50rSnYlK57nJ8FaRIyk 250 | WwnzPZwI4EwA9Cr1LreaFdmI5GvElLinMnzvBxgb8fdHWmoXoj/j21YGXJnCLA5i 251 | zR8UQlz2J0X+FLN6f+oMNCm/LiA0XjyJ/tIYS04G7W8QapYq82xq1o1yNiCPgFmE 252 | OUweADFafQbtU1FWzHUPqfM7MQCUVr89CBB8JdJwldFXva4qgyTa646TzJvb62g5 253 | bkuhL/RRuydSLvtW+Selk9tA9Dmy+wYEJTUFCkN4WxCTggxbkWW/VVVIS3Gd8uLz 254 | 0Cfgc8w33b6o1QNEzcT94uQWes92IHfIySMCANtqjkaH4JOzNDXXzGSGZEL6k0Ij 255 | vkBL9fhXVatwAlSeJNsWAFFmvi3a1Z8syDHjhcy1WsqtpE6PtAfisQ4NIicEERZP 256 | aGIl5abhBLWDlF+jKbi6vTbHGqHBcCwJcUYUpmKtcOhMwcN2zp0jRBeJAhwEEAEC 257 | AAYFAlH4PMEACgkQIizoxDZEAoiNThAAiLbgqUUfWEvR3jkaxCzAUndiKiQXraOl 258 | dFTbHv/4P5wlSoDiiaNoaRKfQaj4nYwbz9tBJP8SKh6/z8+0ek9VFbI6rUAJS54M 259 | Wm/NcCbEQEDbk6R0NFaLLbLYJRLC7af84kGDr4LdMhHKirOzZiJcgOv1MOLs1xdQ 260 | /YZ37ENbISHnN3fGKUBmX9I2RbveB6fYacbRZEyPO8n1JKphUUbDYvShpLJf4cje 261 | 6UDJta5hoe3BERyBwomlErtUx4gIRFHOYZDFpTNsG2xZ/WWFVRoKMg1soQ45+6oS 262 | pxEaxeXcAnD+phRqPcITuFOJSlDSRwPDsbQ2+5g4WoGNMBlU6mvjm9s6oKoqni6k 263 | 2HYouwZwarMHL5G5uwSxs6Yi4MibKMUukYc5m/niYk0tD5INtstyP3oQ9nn6CDuK 264 | nv57qAaoaEkQ5AtZ5d6NOTasBfnOCQe62aOpjupkh7reJeeWrzwHrq1ly3Z1jC/c 265 | z23ZfYyTHdRHVTFBnzAmOZJhJLj0cN8hmHk0jPnxAfwB4+W0GTfbIzFIWrSWd0Yo 266 | TKETaiYzI1jNK3QRxdyxpqKqumFD2I8nx8Jjb0w6QpFDP1Lc/805kVCuovfNq98l 267 | fb6enHS6Y/3vE8ijAw+fRBkRaRN/au9wbezyzmN9Q06yL5lw9rOGPCpXq1fY69t8 268 | rj0Ibe9N1wCJAhwEEAECAAYFAlMKFV0ACgkQDtqpReik/5eefxAAhNSjgqRqmhyD 269 | XC6uCliN60bLP2kjo/C1XbtD3FZaMtZGdmGWweFKjUmG81j1AEgpvZBBT5YagFG1 270 | Y8jGbQC1uGYYGUqxxFExh8oHRTakUTocYOHu1Y/NLM9YcpveihvIHZuaC0E2xYYH 271 | GByhARILXy5mdgnZ9+To3kGFTsmc3k8PpN4oWiR6eCv4FWTHAHSxU/llqFzuVc2e 272 | PuYNN62GzRnLENPOiD0CciYqUlHtC7+6GGHPaZiehtL22DO/KpG2JubF3tEvCUY7 273 | B/r+NQDfCZiC83UHXut4HUbDKUr15Eh6bU48bS3ZRX+LqKT0EUyvCu6h5MN3U86c 274 | Dv5IMtcLa1RsbAErg8QpsP8gx7jd0pDKloBhdCBarCXct/e314IB9bRf89FR1xLE 275 | DxZaljqkWpPoSDUaJxKfBquG9JZyz0jcU2jt+gFy7dLr7A7qMrr+LM47KbOe7Mam 276 | 61CdUJm5UgvcUX5AL3mmilNSOdMFEXo7Wnajk58Q4AnZ/vpzdr+1m48HlnRZ+K+5 277 | BfMMOwQLs7tJGRgy8IjxWz8fp+KOw6cpi1K8dFubnZHTMVYc8UIrfr+0VqhRWtw8 278 | 1sbIVhPaSUvXvsb57qYm5vPyfM8wuVbM/m61RhjyAzhQhoav+Y+50qZPaMWn/bmg 279 | UqnFqIGtE6xdW/341DTVBi3FPkG57KiJAhwEEAECAAYFAlOASmoACgkQ8S2YV0Pu 280 | JiCn8A/+MxtOvhvpWrKttqr9IEB83ZKlyao5aZCfm2IFG8b8WBXry/nl26AoyJoH 281 | U6nDfMaAXeXzHN2MV24wUfWJkGC/uRqiiC5nu21XziDrEZUT/1eM66CFfgpyEhGu 282 | lLxCptrtKvjq9is2YaE/pgyH0KY4bnJ2DQfozBeaIL2iwpj8bvU7I/z8rTvOIT6I 283 | iZL6g6wKuefCTEEQMuTCjd6eBskygI0sr6qlK5q3DJ20jobLstpk4gGMS5/wjeYP 284 | jCQpTVITp/rDzekQzMOrrELZjqEsLPWSihqYkDTxYnTW7WekiuN7zvfJjpDDRZ8T 285 | kmynRZHiB2FtJwvhLibZD1ktb/aNYVvDPq00cB+jf/+dZmfYgjoYMl+B9ft1T1bq 286 | mJ36nnZsAC6CGHqEONsf95UX0FpaiaYhW2OMcCSkdYnxXq84qmiMb6XKuvOLdh5P 287 | DjizXgGx/FHH/xNxIrMz7WNc2l5PbukgeiGO+VFV0m2vDkxhQ6pMalW0foaGglKb 288 | rawzX3oJsHtGSkQfROiM56fNCGxX575bgVWtvZARBCpYHdOSYYumJG33wU+W1k7t 289 | 8cmYxJqp75Y7FPGtyI/QWjJknFhAz426NBROUzGYB1LDkAcDCFUgea2t5MRkoemX 290 | H3ziFJmKIrN2xNSvv8Byd0Nx1j0o3b+ndl/XXLqc+Viy/hOgNdOJAhwEEAEKAAYF 291 | AlOATAQACgkQFiDcWsagfZy6tBAAooBkNRs8VIDJEwWWvdMUBiPM+AfPj611KDAU 292 | ZpeMdvVvRn0wkoGSqOiJM6jrKxM7MV45W6qXm0W/mVRHWhwcke0hfuQBG8uuVoAw 293 | BU+wb5/0W3uM2DYHQTe6iRkdQ4Jd29fw7ko0wWOJPxqthuLDWG6AAmtDUowoi/cS 294 | xPll/oCfBchQ3x4jiFSNmsmqbHWOOEotqbdIu4XxgykAgAXAVv8661ChFRTsJDNV 295 | vr/UHkUIhh1JYQe/m1Pp2C4XJQLamPEh/s7HPiqe0+TgcMm2viw4GPjpw1Sxbteu 296 | vbfCm9psJBZhwZBziArXZsH27KV4ijnZSgOqOot11uEMO/boa7IlW7WaUlHqhboR 297 | i1Etktvxyn72n729a0tungB7Jfc0a1jmLJmQZkKPtqSzUO/Gk11x1Ta7dCnggFnK 298 | d5W9VhqP5QzL+OtVfq4pi8pQRbrEexHqD0MgZQUrx17Fh2REdDna55Ptb1wDpXyP 299 | MwTwITxgK1MnHD8hAzWYdtweSvHWI1fPfc5/aoGi6ox/a8NfIC93+KXNxr53A7Kh 300 | Ia6ba2ekPYycJE6t+jh4J4A5vhtnjqERrLOvpE22a1SCetOZXSIjEOTFLMZd+NZa 301 | 3XUfZb0fluBo8bx0SopzEUtg86cIsl++ZifiGHLvYwqp0j/j8OUcdrXeAG4v7Zyb 302 | WlMuCW2JAhwEEAEKAAYFAlOAt7QACgkQt2GIO0QovcAROg//eRXbh0nJgJyrNaDY 303 | 1YsdQAKR5cIY4uYpKhVVXegXO7U6dn0pJ2GnBTQz5vX30YBQzhDijpwh5Oeot3+m 304 | V9Z00OYS2rDxPuHIYlYAeR/SfhzWc2ATdAVbiuLzM95Y9onf6cOazsakhthwuMss 305 | 0LNIBy2jrym9oqufoRDlQ65v3uDnTRWNaVxLup9daWAxwq8tUtQ0cGBVLFPKCFAO 306 | RDCFGXUhkH4tIzWnWoeBZhl14y7sNfMrabmG2xweNy/4GVXFFcI8V/rOHtkgqdpm 307 | hYqJYCOnbNfL1We1gnsOls6Zi6Qfs9bKz1vbpqAxRr6IfKUE+XKKMLZ28QS7n6bK 308 | o8FmTrRP46e2tOng89elfnRODKVtpszSCXGgckG8+2DI5blAzhQx2fVYBfeiFb73 309 | LnlbIxZY9daetCX2xnv1DIouag8kTy7ulw5cGkEnk28uCFH/BFvTgM9eMutG+oTl 310 | X6Y+khgUWsZcP3BeGNayAwhULqjEYfnkshLZmAu2jUwnsd8wQ9YbrOTH80gayzVf 311 | Ft3tk4y9QWJ5jaXvNZZUOzVoADfJ1zr+0DZ6SO9tFaM6QhsDZy2Z0aqjoJ+qKpmw 312 | OGioOz7Fjq0nQiQ56TlNEAwZs3cak73afcg7kEulnybOiLj26gM2JWVmUMtWJZyU 313 | RO2av0J+9q9he6LgK09siyKTi62JAhwEEAEKAAYFAlOBIDAACgkQXXDS68rSyea5 314 | Rw//WRbJ+gCoi3UMVj45368vkdjUgwobra31zdNN9/uV75C04bt++7QgcVkemyKJ 315 | i6Y6+FIMEGjQF/LdoMcXXe6X6FxN8PO4Z29sM2iZr4Y7pxxbRmsWMPd5JWtSc72d 316 | PXaxOqtaXn78bLBy4fyG18DqkvrQaCS3KalwxuL+W9O0U1bUNx+jjbd/1th0SQ3v 317 | X4/VR9Pn3htsZRByUi6B4IsewYr8Yg320CgE6lrpv3xTbxwfGzp6hZpNG7V+EBb0 318 | wI/gm6Pt3g6988Duj1chGtDNwoWJymZGLRVF7vMHhu1sgyJ7yOEytSbWZ7PXc9JU 319 | HwsHf9uK/G0OCpgHL4H1XZ01CNgTv5pPSDwlWahevxZ3J8/XzAP2itZSjCkEGPhH 320 | 5GClH9LxB8yVkkx8ciQCFIBgjBoBwnh1uho0YAS0qxLtp4f/otRQYhS5R5MmYVKf 321 | Sju/SEPQ9QZ3pnff+/klHh/l6xpnBl3fxJKOPuU5u3N2CRAvmwJ3veBMuFmbnXUO 322 | M1qyeUmlE6op4H0tAJQ43jre9fpBYtce0PvaBobgbxJEhRQeO3u931ShVTCqhXY3 323 | llGGgOaD4tpSmcfPXN1uZVmsTpYcEOrEHUncLT0dBtXzbYQkBb8JEOs31jR+G5xY 324 | 3dMVDpkb5RdRJZwmDvPHJRgHS04w1Qs28L5euSJqGa3Gyla5AQ0ESqDtkAEIANri 325 | sUkUiqXdiTtTg1A7saaWGnGNSZHpsQ1BBxt9XKglU7hc+GpTcpeCPvKZrV9ovmB+ 326 | 1vXsO7hoI0czxI0zpvP2c9rnvnkHq6L3rqNIchWvWNwsYTTAhNxB+ALn3aH7gpp1 327 | H/1u088iL83nK8BT5cmYalp5sRKrvBWkfo1yvSbMdmW8vxIoTaAsEDlrXb9yODou 328 | SgW4JcX8ROz5mIKPQ2Kk5h8ZrJz4lBOBWGRtuty5dWapG8Fu+58AxJdbofrY2zrv 329 | mNXCsHnFm8lSLmNXijp81QvPJXVUiscyjhrt+zMuM+uRkwcjEHzAZ6GfhWRRmIgo 330 | k4Y/nPIvdwyEc2HjrI0AEQEAAYkCRAQYAQIADwIbAgUCUD4zcwUJCV+s4QEpwF0g 331 | BBkBAgAGBQJKoO2QAAoJEHSpQbohnsgQtBEH+QH/xtP9sc9EMB+fDegsf2aDHLT2 332 | 8YpvhfjLWVrYmXRiextcBRiCwLT6khulhA2vk4Tnh22dbhr87hUtuCJZGR5Y4E2Z 333 | S99KfAxXcu96Wo6Ki0X665G/QyUxoFYT9msYZzlv0OmbuIaED0p9lRlTlZrsDG96 334 | 9a/d30G8NG0Mv6CH/Sfqtq26eP3ITqHXe1zFveVTMIliBHaWGg9JqHiu/mm2MwUx 335 | uQAzLmaCtma5LXkGTUHsUruIdHplnqy7DHb3DC8mIjnVj9dvPrNXv54mxxhTwHkT 336 | 5EPjFTzGZa6oFavYt+FzwPR67cVQXfz7jh6GktcqxrgA7KUmUwuaJ+DzGkIJEO6M 337 | vJ6Ibd2J/jcH/2y32VvUVKgkAJsNS+UC3zGG9xgCBasf4d+MtzAU6BM16p30hJGR 338 | jNYmbSIDtYt6FThlZWhmhPsECoYOsyWeqEzmfv7n0Tv8XZnxh2XZQH1BGrv2Ii5J 339 | xJRFDQEHVDbjAfU1LZgb4dNGmKDo80aAHp7U0vv96d/okFyvDTyY0FgTU6FFVEIm 340 | A/WwQiCv2h4QYZnRHNGH3FskIHeiC++eOwUSWzw47jsgH0Viyde7iTBmKjhU5Kbr 341 | ZqRvJ9HRtMlHFCODKAPFO/YhvNxAHeb00tXiYk5nPjywZ3pJ74gRAYkkTuGLuauE 342 | MZLDqmIRWAczo+h18ICQnM1RGVAp7lu5B8M= 343 | =QDQh 344 | -----END PGP PUBLIC KEY BLOCK----- 345 | -------------------------------------------------------------------------------- /tor/templates/notice.html.j2: -------------------------------------------------------------------------------- 1 | 2 | 3 | This is a Tor Router 4 | 5 |

This is a Tor Router

6 |

image/svg+xml



7 | 8 | Details about this relay can be found on atlas.torproject.org or globe.torproject.org! 9 | 10 | --------------------------------------------------------------------------------