├── .bumpversion.cfg ├── .gitignore ├── LICENSE ├── README.md ├── TODO.md ├── UPGRADING.md ├── ansible.cfg ├── archive ├── broken_clickhouse_repo.yml └── migrate_v1_to_v2.yml ├── bin ├── gcloud_sdb ├── kickstart ├── loader └── ycloud_vdb ├── clickhouse_migrations ├── .keep ├── 001-recreate-database.tmp ├── 002-channel-to-string.yml └── 003-scheme-to-string.yml ├── config_to_email.yml ├── convert_users.yml ├── filter_plugins ├── ips.py └── ownhtpass.py ├── google_cl.json ├── group_vars ├── all.yml └── wildcard.yml ├── install_roles.yml ├── inventory ├── .keep ├── all └── template-private.yml ├── keys └── support.pub ├── makefile ├── os_init.yml ├── os_upgrade.yml ├── platform.yml ├── requirements.txt ├── roles └── .keep ├── tasks ├── ch_migrate.yml ├── ch_migration_run.yml ├── check_dns.yml ├── setup_http_debug.yml ├── setup_logspout.yml ├── setup_metrics_server.yml └── setup_vpn_server.yml ├── templates ├── chproxy │ └── config.yml.j2 ├── facts.d │ └── config.fact.j2 ├── google-webmaster.j2 ├── nginx │ └── upstream-site.conf.j2 └── theia │ └── Dockerfile └── vars └── nginx.yml /.bumpversion.cfg: -------------------------------------------------------------------------------- 1 | [bumpversion] 2 | current_version = 3.10.3 3 | commit = True 4 | tag = False 5 | 6 | [bumpversion:file:bin/kickstart] 7 | 8 | [bumpversion:file:group_vars/all.yml] 9 | 10 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | inventory/* 2 | group_vars/* 3 | host_vars/* 4 | keys/* 5 | ovpn_keys/* 6 | __pycache__ 7 | /.vscode 8 | /temp_data 9 | /.mypy_cache 10 | *.pyc 11 | /install_log 12 | /files/.htpasswd.tools 13 | /files/users.yml 14 | /files/* 15 | /sync* 16 | /.env 17 | /tasks/custom* 18 | tasks/custom.yml 19 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright 2018 Dmitry Rodin 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Rockstat bootstrap 2 | 3 | ![Rockstat architecture](https://rstat.org/static/media/schemas/rockstat-main-components.svg?3) 4 | 5 | [Read more](https://rstat.org) 6 | 7 | ## Requirements 8 | 9 | Virtual or dedicated server with at least: 10 | 11 | - 2 core 12 | - 8 Gb mem 13 | - 60 Gb SSD 14 | 15 | Requires OS: Ubutnu 16.04.x 16 | 17 | **Caution!** 18 | 19 | Rockstat required fresh server without any other software. Don't install rockstat on existing server with other software! 20 | Setup process do significant system reconfiguration. 21 | 22 | ### Domain records 23 | 24 | 25 | Typically DNS zone looks like: 26 | 27 | ``` 28 | stats.yourdomain A 1.2.3.4 29 | *.stats.yourdomain A 1.2.3.4 30 | ``` 31 | 32 | for second level domains: 33 | 34 | ``` 35 | yourdomain (or @) A 1.2.3.4 36 | *.yourdomain A 1.2.3.4 37 | ``` 38 | 39 | ### Start server 40 | 41 | Required steps depends of 42 | 43 | ### Local setup on server 44 | 45 | #### Connect to server 46 | 47 | Open Terminal application (On Windows know as `cmd`) Type following connection command: 48 | 49 | ```bash 50 | ssh root@yourdomain 51 | ``` 52 | 53 | #### Intials 54 | 55 | You need to setup curl only once 56 | 57 | ```bash 58 | sudo apt -qqy update && sudo apt -qqy install curl 59 | ``` 60 | 61 | Then run: 62 | 63 | ```bash 64 | curl -s https://raw.githubusercontent.com/rockstat/bootstrap/master/bin/kickstart | sudo -E bash - 65 | 66 | sudo reboot 67 | ``` 68 | 69 | For **development** version 70 | 71 | ``` 72 | curl -s https://raw.githubusercontent.com/rockstat/bootstrap/dev/bin/kickstart | sudo -E BRANCH=dev bash - 73 | 74 | sudo reboot 75 | ``` 76 | 77 | #### Upgrade / reconfigure installation 78 | 79 | Rockstat is on active development stage. Lookat at page [What's new](https://rock.st/docs/what-s-new). Take a latest version. 80 | 81 | To run setup tool just type `rockstat` 82 | 83 | ### Direct ansible usage 84 | 85 | configure inventory 86 | 87 | ``` 88 | # ... 89 | test ansible_host=test.rstat.org realname=User email=hello@rstat.org 90 | ``` 91 | Generate password using `make password`. Execute playbook 92 | 93 | ``` 94 | AHOST=test 95 | APASS='$apr1$G2B2.GYy$QiBhuOZeRC03moZTPsB561' 96 | ansible-playbook platform.yml --limit=$AHOST --tags=ssl,full -e admin_password=$APASS 97 | ``` 98 | 99 | #### Custom tasks 100 | 101 | You should create `tasks/custom.yml`. Possible to be an empty file. 102 | 103 | ```shell 104 | touch tasks/custom.yml 105 | ``` 106 | 107 | ## Params 108 | 109 | To force SSL `-e ssl_force=1` 110 | 111 | ## Overriding configuration 112 | 113 | Create configuration for your hosts group `groupvars/private.yml` 114 | You can override configuration by specifing alternative values. 115 | 116 | Configurations has a parts prepared for easy overriding/extending: 117 | 118 | #### images_extra 119 | 120 | ```yaml 121 | _images_extra: 122 | chproxy: myusername/chproxy 123 | redis: redis:4-alpine 124 | ``` 125 | 126 | will override only thease two images 127 | 128 | #### Custom env 129 | 130 | ```yaml 131 | _containers_env_extra: 132 | director: 133 | PORT: 1899 134 | ``` 135 | 136 | #### Disable support access 137 | 138 | ```yaml 139 | enable_support: no 140 | ``` 141 | 142 | ### Google Cloud Compute Engine instance configuration 143 | 144 | You need to configure additional persistent SSD disk. 145 | 146 | Use docs at https://cloud.google.com/compute/docs/disks/add-persistent-disk 147 | 148 | Or execute prepared script that configure `/dev/sdb` disk. **Danger! If disk currently not mounted it will be formatted!** 149 | 150 | ``` 151 | curl -s https://raw.githubusercontent.com/rockstat/bootstrap/master/bin/gcloud_sdb | sudo bash - 152 | ``` 153 | 154 | ### Yandex Cloud instance configuration 155 | 156 | Prepating additional drive 157 | 158 | Or execute prepared script that configure `/dev/vdb` disk. **Danger! If disk currently not mounted it will be formatted!** 159 | 160 | ``` 161 | curl -s https://raw.githubusercontent.com/rockstat/bootstrap/master/bin/ycloud_vdb | sudo bash - 162 | ``` 163 | 164 | 165 | ### IPv6 166 | 167 | By default setup tool disabling IPv6 support. 168 | To prevent set `disable_ipv6` to `no` at you custom config. 169 | 170 | ## Community 171 | 172 | Join to discuss with other users 173 | 174 | * Telegram https://t.me/rockstats 175 | * Facebook https://fb.com/rockstatX 176 | 177 | ## Rockstat Bootstrap License and Authors 178 | 179 | * Author:: Dmitry Rodin 180 | * Maintainer:: Ivan Golubenko 181 | * Maintainer:: Alexander Shvets 182 | 183 | Licensed under the Apache License, Version 2.0 (the "License"); 184 | you may not use this file except in compliance with the License. 185 | You may obtain a copy of the License at 186 | 187 | http://www.apache.org/licenses/LICENSE-2.0 188 | 189 | Unless required by applicable law or agreed to in writing, software 190 | distributed under the License is distributed on an "AS IS" BASIS, 191 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 192 | See the License for the specific language governing permissions and 193 | limitations under the License. 194 | -------------------------------------------------------------------------------- /TODO.md: -------------------------------------------------------------------------------- 1 | 2 | - Before install show warning about Rockstat required personal server without other softwate. data can be lost. 3 | -------------------------------------------------------------------------------- /UPGRADING.md: -------------------------------------------------------------------------------- 1 | # Migrating from v2 guide 2 | 3 | ### Setup server with v3 4 | 5 | View [README.md](readme) 6 | 7 | ### Configure nginx query redirection from current server 8 | 9 | add to /etc/nginx/sites-enabled/tracker.conf before ` location / {`. 10 | 11 | -------------------------------------------------------------------------------- /ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | inventory = inventory 3 | host_key_checking = False 4 | retry_files_enabled = False 5 | remote_user = root 6 | # ##### facts 7 | # gathering = smart 8 | # fact_caching = redis 9 | # fact_caching_timeout = 86400 10 | # fact_caching = jsonfile 11 | # fact_caching_connection = ./temp_data/cache 12 | remote_tmp = /tmp/.ansible-${USER}/tmp 13 | 14 | [ssh_connection] 15 | ssh_args =-o ForwardAgent=yes -o ControlMaster=auto -o ControlPersist=60s 16 | pipelining = True 17 | 18 | -------------------------------------------------------------------------------- /archive/broken_clickhouse_repo.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Removing old CH repo 3 | hosts: all 4 | remote_user: '{{support_user}}' 5 | become: yes 6 | vars: 7 | clickhouse_repo: "deb http://repo.yandex.ru/clickhouse/xenial stable main" 8 | tasks: 9 | - name: remove repo key 10 | apt_key: 11 | keyserver: keyserver.ubuntu.com 12 | id: E0C56BD4 13 | state: absent 14 | become: true 15 | 16 | - name: remove ClickHouse repo 17 | apt_repository: 18 | repo: "{{ clickhouse_repo }}" 19 | state: absent 20 | become: true 21 | -------------------------------------------------------------------------------- /archive/migrate_v1_to_v2.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Migrating to alcolytics 2.0 3 | hosts: all 4 | remote_user: '{{support_user}}' 5 | become: yes 6 | vars: 7 | docker_service: docker.service 8 | old_anaconda_data_dir: /srv/jupyter 9 | old_ch_upload_dir: /srv/upload_ch 10 | tasks: 11 | 12 | - name: Checking data dir exists 13 | stat: 14 | path: '{{alco_data_dir}}' 15 | register: ex 16 | 17 | # - name: Checking migration done 18 | # meta: end_play 19 | # when: ex.stat.exists == True 20 | 21 | - name: remove anaconda container 22 | docker_container: 23 | name: anaconda 24 | state: absent 25 | 26 | - name: remove alco-tracker container 27 | docker_container: 28 | name: alco-tracker 29 | state: absent 30 | 31 | - name: Creating new alco home_dir 32 | file: 33 | path: '{{alco_home_dir}}' 34 | state: directory 35 | mode: 0755 36 | 37 | - name: Copying anaconda data to new location 38 | command: creates={{alco_data_dir}} cp -R {{old_anaconda_data_dir}} {{alco_data_dir}} 39 | 40 | - name: Copying alco-tracker upload data to new location 41 | command: creates={{alco_data_ch_upload_dir}} cp -R {{old_ch_upload_dir}} {{alco_data_ch_upload_dir}} 42 | 43 | -------------------------------------------------------------------------------- /bin/gcloud_sdb: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Usage: curl -s https://raw.githubusercontent.com/rockstat/bootstrap/master/bin/gcloud_sdb | sudo bash - 3 | 4 | set -e 5 | 6 | DISK=/dev/sdb 7 | 8 | if [ ! -e $DISK ] ; then 9 | echo "Device $DISK not exists. Break." 10 | exit 1 11 | fi 12 | 13 | if grep $DISK /etc/mtab > /dev/null 2>&1; then 14 | echo "Disk already mounted. Skip formatting" 15 | else 16 | mkfs.ext4 -m 0 -F -E lazy_itable_init=0,lazy_journal_init=0,discard $DISK 17 | chmod a+w /srv 18 | echo "Disk formatted." 19 | fi 20 | 21 | 22 | SDBID=`blkid $DISK | cut -d' ' -f2 | cut -d\" -f2` 23 | 24 | 25 | if grep $SDBID /etc/fstab > /dev/null 2>&1; then 26 | echo "Record already in fstab." 27 | grep -v "/srv" /etc/fstab > temp && mv temp /etc/fstab 28 | chmod 0744 /etc/fstab 29 | chown root:root /etc/fstab 30 | fi 31 | 32 | 33 | cp /etc/fstab /etc/fstab.backup 34 | echo -e "UUID=$SDBID /srv ext4 discard,defaults,nofail 0 2\n" >> /etc/fstab 35 | echo "Record added to fstab." 36 | 37 | echo "Done! You HAVE TO reboot server. Type 'sudo reboot' and press enter" -------------------------------------------------------------------------------- /bin/kickstart: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | VERSION=3.10.3 4 | PLATFORM_HOME=/srv/platform 5 | BOOTSTRAP_BRANCH=${BRANCH:-master} 6 | BOOTSTRAP_REPO=https://github.com/rockstat/bootstrap.git 7 | BOOTSTRAP_DIR=/srv/platform/bootstrap 8 | BOOTSTRAP_BINDIR=${BOOTSTRAP_DIR}/bin 9 | KICKSTART=${BOOTSTRAP_BINDIR}/kickstart 10 | KICKSTART_CMD="curl -s https://raw.githubusercontent.com/rockstat/bootstrap/${BOOTSTRAP_BRANCH}/bin/kickstart | sudo -E bash -\n" 11 | BINALIAS=/usr/local/bin/rockstat 12 | REPORT_COLLECTOR=https://bolt.rstat.org/upload 13 | INSTALL_LOG=$(mktemp /tmp/platform-setup.XXXXXXXX) 14 | FACT_CONF=/etc/ansible/facts.d/config.fact 15 | CUSTOM_TASKS_FILE=tasks/custom.yml 16 | 17 | # state vars 18 | DOMAIN="" 19 | FULLNAME="" 20 | EMAIL="" 21 | ADMIN_PASSWORD="" 22 | DEF_HOSTALIAS="rstat" 23 | HOSTALIAS="$DEF_HOSTALIAS" 24 | ENABLE_SUPPORT_DEF="1" 25 | ENABLE_SUPPORT="$ENABLE_SUPPORT_DEF" 26 | SSL_CHALLENGE_DEF="http" 27 | SSL_CHALLENGE="$SSL_CHALLENGE_DEF" 28 | INSTALLED="" 29 | 30 | # setup state vars 31 | ROLES_UPDATED=0 32 | 33 | # Preventing "bash: event not found" error 34 | set +H 35 | # Any subsequent(*) commands which fail will cause the shell script to exit immediately 36 | set -e 37 | 38 | # set -e 39 | # set -o pipefail 40 | 41 | ################################################################################ 42 | # 2. Library 43 | ################################################################################ 44 | 45 | umask 022 46 | export DEBIAN_FRONTEND=noninteractive 47 | 48 | if test -t 1; then # if terminal 49 | ncolors=$(which tput > /dev/null && tput colors) # supports color 50 | if test -n "$ncolors" && test $ncolors -ge 8; then 51 | termcols=$(tput cols) 52 | bold="$(tput bold)" 53 | underline="$(tput smul)" 54 | standout="$(tput smso)" 55 | normal="$(tput sgr0)" 56 | black="$(tput setaf 0)" 57 | red="$(tput setaf 1)" 58 | green="$(tput setaf 2)" 59 | yellow="$(tput setaf 3)" 60 | blue="$(tput setaf 4)" 61 | magenta="$(tput setaf 5)" 62 | cyan="$(tput setaf 6)" 63 | white="$(tput setaf 7)" 64 | fi 65 | fi 66 | 67 | base_print() { 68 | echo -e "\n$1\n" 69 | } 70 | 71 | print_status() { 72 | base_print "${cyan}${bold}>> ${blue}${1}${normal}" 73 | } 74 | 75 | print_error() { 76 | base_print "${red}${bold}!> ${1}${normal}" 77 | } 78 | 79 | print_ok() { 80 | echo "${cyan}${bold}>> ${normal}Ok" 81 | } 82 | 83 | 84 | ### DEPRECATED 85 | 86 | # check credentials 87 | print_error "Master version is too old and deprecated. Switch to 'dev' branch then README.md for more information"; 88 | print_status "https://github.com/rockstat/bootstrap/tree/dev"; 89 | exit 1; 90 | 91 | 92 | 93 | 94 | ################################################################################ 95 | ## Request root and banner 96 | ################################################################################ 97 | 98 | clear 99 | base_print """ 100 | ${cyan}${bold}##${normal} 101 | ${cyan}${bold}## Rockstat Bootstrap${normal} 102 | ${cyan}${bold}##${normal} 103 | ${cyan}${bold}## ${normal}version ${VERSION} 104 | ${cyan}${bold}## ${normal}branch ${BOOTSTRAP_BRANCH} 105 | ${cyan}${bold}## ${normal}logs output $INSTALL_LOG 106 | ${cyan}${bold}##${normal}""" 107 | 108 | ################################################################################ 109 | ## Checks 110 | ################################################################################ 111 | 112 | # check credentials 113 | if [ ! "$UID" -eq 0 ]; then print_error "Run as root or insert 'sudo -E' before 'bash'"; exit 1; fi 114 | # check debian-like 115 | if [[ ! -f /etc/debian_version ]]; then print_error "Target OS is Ubutu 16.04"; exit 1; fi 116 | 117 | ################################################################################ 118 | ## System deps 119 | ################################################################################ 120 | 121 | update_bootstrap() { 122 | set -e 123 | if [[ -d "$BOOTSTRAP_DIR" ]]; then 124 | print_status "Updating repo $BOOTSTRAP_REPO" 125 | cd $BOOTSTRAP_DIR 126 | git fetch --all && git reset --hard "origin/$BOOTSTRAP_BRANCH" && print_ok 127 | # git checkout $BOOTSTRAP_BRANCH && git pull --rebase 128 | else 129 | print_status "Cloning repo $BOOTSTRAP_REPO" 130 | mkdir -p $BOOTSTRAP_DIR 131 | cd $BOOTSTRAP_DIR 132 | echo $(pwd) 133 | git clone $BOOTSTRAP_REPO . && git checkout $BOOTSTRAP_BRANCH && print_ok 134 | fi 135 | } 136 | 137 | setup_langvars() { 138 | set -e 139 | print_status "Exporting locale vars" 140 | export LANG=en_US.UTF-8 141 | export LC_ALL=en_US.UTF-8 142 | export LANGUAGE=en_US.UTF-8 143 | export LC_TYPE=en_US.UTF-8 144 | print_ok 145 | } 146 | 147 | setup_locale() { 148 | export DEBIAN_FRONTEND=noninteractive 149 | set -e 150 | print_status "Setting en_US.UTF-8 locale" 151 | # echo -e 'LANGUAGE=en_US.UTF-8\nLANG=en_US.UTF-8\nLC_ALL=en_US.UTF-8\nLC_TYPE=en_US.UTF-8' > /etc/default/locale 152 | echo -e '# Generated by Rockstat setup script\nLANGUAGE=en_US.UTF-8\nLANG=en_US.UTF-8\nLC_ALL=en_US.UTF-8\nLC_TYPE=en_US.UTF-8' | tee /etc/default/locale 153 | locale-gen en_US.UTF-8 > $INSTALL_LOG 154 | dpkg-reconfigure locales > $INSTALL_LOG 155 | setup_langvars 156 | print_ok 157 | } 158 | 159 | update_apt_repo(){ 160 | export DEBIAN_FRONTEND=noninteractive 161 | print_status "Updating packages registry" 162 | apt-get -yqq update \ 163 | && print_ok 164 | } 165 | 166 | setup_system_packages() { 167 | export DEBIAN_FRONTEND=noninteractive 168 | print_status "Installing requirements" 169 | apt-get -yqq install apt-utils > $INSTALL_LOG \ 170 | && apt-get -yqq install dialog whiptail nano \ 171 | curl git locales \ 172 | python3 python3-dev python3-pip python3-netaddr python3-setuptools python3-requests \ 173 | build-essential libffi-dev ca-certificates zlib1g-dev libssl-dev openssl > $INSTALL_LOG \ 174 | && print_ok 175 | } 176 | 177 | setup_python_packages() { 178 | print_status "Installing required python packages" 179 | pip3 -q install wheel \ 180 | && pip3 -q install -r $BOOTSTRAP_DIR/requirements.txt -U \ 181 | && print_ok 182 | } 183 | 184 | setup_runner() { 185 | print_status "Installing/Updating rockstat shortcut" 186 | rm -f $BINALIAS \ 187 | && echo -e "#!/usr/bin/env bash\n${KICKSTART_CMD}\n" > $BINALIAS \ 188 | && chmod +x $BINALIAS \ 189 | && print_ok 190 | } 191 | 192 | setup_playbook() { 193 | if [ $ROLES_UPDATED -eq 1 ]; then return 0; fi 194 | print_status "Updating ansible roles" \ 195 | && cd $BOOTSTRAP_DIR \ 196 | && ansible-galaxy install -r install_roles.yml --force > $INSTALL_LOG \ 197 | && touch $CUSTOM_TASKS_FILE \ 198 | && print_ok \ 199 | && ROLES_UPDATED=1 200 | } 201 | 202 | ################################################################################ 203 | ## Dialogs 204 | ################################################################################ 205 | 206 | # whiptail --title "Radio list example" --radiolist \ 207 | # "Choose user's permissions" 20 78 4 \ 208 | # "NET_OUTBOUND" "Allow connections to other hosts" ON \ 209 | # "NET_INBOUND" "Allow connections from other hosts" OFF \ 210 | # "LOCAL_MOUNT" "Allow mounting of local devices" OFF \ 211 | # "REMOTE_MOUNT" "Allow mounting of remote devices" OFF 212 | 213 | show_dialog(){ 214 | whiptail --title "$1" --msgbox "$2" 8 78 215 | } 216 | 217 | whiptailInput() { 218 | eval local init="\$$1" 219 | case "$1" in 220 | *PASSWORD*) local prompt='passwordbox'; showval="" ;; 221 | *) local prompt='inputbox'; showval=$init; ;; 222 | esac 223 | local value=$(whiptail --title "$2" --${prompt} "$3" $4 $5 $showval 3>&1 1>&2 2>&3) 224 | local rc=$? 225 | if [ $rc = 0 ]; then 226 | if [ $prompt == 'passwordbox' ]; then 227 | local confirmation=$(whiptail --title "$2 / confirmation" --${prompt} "$3" $4 $5 $showval 3>&1 1>&2 2>&3) 228 | local rc=$? 229 | if [ $rc = 0 ]; then 230 | if [ "$value" != "$init" ]; then 231 | if [ $value == $confirmation ]; then 232 | if [[ -n ${value// } ]]; then 233 | enc=$(openssl passwd -apr1 ${value}) 234 | eval $1="'$enc'" 235 | save_config 236 | fi 237 | fi 238 | fi 239 | fi 240 | else 241 | eval $1="'$value'" 242 | save_config 243 | fi 244 | fi 245 | } 246 | 247 | request_domain(){ 248 | whiptailInput "DOMAIN" "Domain" "Please enter domain for your tracker." 8 78 249 | } 250 | 251 | request_hostalias(){ 252 | whiptailInput "HOSTALIAS" "Short hostname" "Shot server hostname that you can see at command line prompt." 8 78 253 | } 254 | 255 | request_email(){ 256 | whiptailInput "EMAIL" "Email" "Email required for issuing letsencrypt SSL and GIT." 8 78 257 | } 258 | 259 | request_fullname(){ 260 | whiptailInput "FULLNAME" "Full name" "Provide your name. Will be used in git configuration." 8 78 261 | } 262 | 263 | request_password(){ 264 | whiptailInput "ADMIN_PASSWORD" "Admin password" "Provide secret password for admin user." 8 78 265 | } 266 | 267 | update_reboot_dialog(){ 268 | show_dialog "System upgrade" "After upgrade complete server will be rebooted and you need to connect agant to continue." 269 | } 270 | 271 | greate_success_dialog(){ 272 | show_dialog "Execution completed" "Greate success!" 273 | } 274 | 275 | run_full_setup_to_apply_dialog(){ 276 | show_dialog "Applying changes" "Run full install/upgrade to apply changes" 277 | } 278 | 279 | ################################################################################ 280 | ## Actions 281 | ################################################################################ 282 | 283 | ANS_PY="-e ansible_python_interpreter=/usr/bin/python3" 284 | ANS_BRANCH="-e branch=${BOOTSTRAP_BRANCH}" 285 | 286 | 287 | run_platform_playbook() { # (tags, custom) 288 | setup_playbook 289 | print_status "Starting ansible" 290 | cmd="platform.yml --connection=local --tags=${1} $ANS_PY $ANS_BRANCH ${2}" 291 | echo "executing ansible-playbook ${cmd}" 292 | ansible-playbook $cmd 293 | if [ $? -eq 0 ]; then print_status "Done"; else print_error "FAILED"; exit 1; fi 294 | greate_success_dialog 295 | } 296 | 297 | run_upgrade_playbook() { 298 | print_status "Starting ansible" 299 | ansible-playbook os_upgrade.yml --connection=local $ANS_PY && exit 0 300 | if [ $? -eq 0 ]; then print_status "Done"; else print_error "FAILED"; exit 1; fi 301 | # server shold be go to reboot 302 | } 303 | 304 | switch_support_value(){ 305 | ENABLE_SUPPORT=$([ "$ENABLE_SUPPORT" == "1" ] && echo "0" || echo "1") 306 | save_config 307 | run_full_setup_to_apply_dialog 308 | } 309 | 310 | switch_ssl_challenge(){ 311 | SSL_CHALLENGE=$([ "$SSL_CHALLENGE" == "dns" ] && echo "http" || echo "dns") 312 | save_config 313 | run_full_setup_to_apply_dialog 314 | } 315 | 316 | ################################################################################ 317 | ## Configs 318 | ################################################################################ 319 | 320 | load_config(){ 321 | if [ -a "$FACT_CONF" ]; then 322 | INSTALLED=$(awk -F "=" '/installed/ {print $2}' $FACT_CONF) 323 | DOMAIN=$(awk -F "=" '/domain/ {print $2}' $FACT_CONF) 324 | EMAIL=$(awk -F "=" '/email/ {print $2}' $FACT_CONF) 325 | ADMIN_PASSWORD=$(awk -F "=" '/admin_password/ {print $2}' $FACT_CONF) 326 | FULLNAME=$(awk -F "=" '/fullname/ {print $2}' $FACT_CONF) 327 | HOSTALIAS=$(awk -F "=" '/hostalias/ {print $2}' $FACT_CONF) 328 | ENABLE_SUPPORT=$(awk -F "=" '/enable_support/ {print $2}' $FACT_CONF) 329 | SSL_CHALLENGE=$(awk -F "=" '/ssl_challenge/ {print $2}' $FACT_CONF) 330 | if [[ -z "$HOSTALIAS" ]]; then HOSTALIAS=$DEF_HOSTALIAS; fi 331 | if [[ -z "$ENABLE_SUPPORT" ]]; then ENABLE_SUPPORT=$ENABLE_SUPPORT_DEF; fi 332 | if [[ -z "$SSL_CHALLENGE" ]]; then SSL_CHALLENGE=$SSL_CHALLENGE_DEF; fi 333 | fi 334 | } 335 | 336 | save_inventory(){ 337 | cd "$BOOTSTRAP_DIR" 338 | echo """ 339 | [private] 340 | ${HOSTALIAS} ansible_host=${DOMAIN} 341 | 342 | [rockstat] 343 | ${HOSTALIAS} 344 | """ > inventory/private 345 | } 346 | 347 | save_config(){ 348 | save_inventory \ 349 | && mkdir -p $(dirname $FACT_CONF) \ 350 | && echo """[general] 351 | domain=${DOMAIN} 352 | email=${EMAIL} 353 | admin_password=${ADMIN_PASSWORD} 354 | hostalias=${HOSTALIAS} 355 | fullname=${FULLNAME} 356 | enable_support=${ENABLE_SUPPORT} 357 | ssl_challenge=${SSL_CHALLENGE} 358 | installed=${INSTALLED}""" > $FACT_CONF 359 | } 360 | 361 | ################################################################################ 362 | ## Executing 363 | ################################################################################ 364 | 365 | setup_platform(){ 366 | setup_langvars 367 | update_apt_repo 368 | setup_system_packages 369 | setup_locale 370 | update_bootstrap 371 | setup_python_packages 372 | setup_runner 373 | INSTALLED=$VERSION 374 | save_config 375 | } 376 | 377 | update_platform(){ 378 | update_bootstrap 379 | setup_runner 380 | } 381 | 382 | initialize(){ 383 | while [ -z "${DOMAIN// }" ]; do request_domain 384 | done 385 | while [ -z "${EMAIL// }" ]; do request_email 386 | done 387 | while [ -z ${ADMIN_PASSWORD// } ]; do request_password 388 | done 389 | while [ -z ${FULLNAME// } ]; do request_fullname 390 | done 391 | while [ -z ${HOSTALIAS// } ]; do request_hostalias 392 | done 393 | } 394 | 395 | if [[ -a "$FACT_CONF" ]]; then 396 | print_status "Reading local configuration" 397 | load_config 398 | fi 399 | 400 | print_status "Preparing system" 401 | if [ "$INSTALLED" == "$VERSION" ]; then update_platform; fi 402 | while [ "$INSTALLED" != "$VERSION" ]; do setup_platform 403 | done 404 | 405 | 406 | MENU_TEXT="\nChoose an option:\n" 407 | 408 | menu() { 409 | # --menu 410 | SUPPORT_STATE=$([ "$ENABLE_SUPPORT" == "1" ] && echo "Disable" || echo "Enable") 411 | SSL_CHALLENGE_STATE=$([ "$SSL_CHALLENGE" == "dns" ] && echo || echo ) 412 | case "$SSL_CHALLENGE" in 413 | "dns") SSL_CHALLENGE_STATE="DNS (wildcard)" ;; 414 | "http") SSL_CHALLENGE_STATE="HTTP" ;; 415 | esac 416 | OPTION=$(whiptail --title "Rockstat Shell Script Menu" --menu "${MENU_TEXT}" 30 60 18 \ 417 | "01" " Upgrade OS" \ 418 | "03" " Full Install/Upgrade" \ 419 | "04" " Platform Upgrade" \ 420 | "11" " Change admin Password" \ 421 | "12" " Change domain '${DOMAIN}'" \ 422 | "13" " Change host alias '${HOSTALIAS}'" \ 423 | "14" " Change Email '${EMAIL}'" \ 424 | "15" " Change full name '${FULLNAME}'" \ 425 | "20" " Apply admin password to webserver" \ 426 | "30" " Setup OpenVPN server" \ 427 | "34" " Setup Demo-site" \ 428 | "31" " Request SSL certificate with force option" \ 429 | "32" " $SUPPORT_STATE remote upgrade and support access" \ 430 | "33" " SSL challenge: ${SSL_CHALLENGE_STATE}" \ 431 | "00" " Exit" 3>&1 1>&2 2>&3) 432 | EXITCODE=$? 433 | [[ "$EXITCODE" = 1 ]] && break; 434 | [[ "$EXITCODE" = 255 ]] && break; 435 | # echo "exitcode: $EXITCODE" 436 | 437 | case "$OPTION" in 438 | "01") update_reboot_dialog; run_upgrade_playbook ;; 439 | "03") run_platform_playbook full ;; 440 | "04") run_platform_playbook pservice,ppart ;; 441 | "11") request_password ;; 442 | "12") request_domain ;; 443 | "13") request_hostalias ;; 444 | "14") request_email ;; 445 | "15") request_fullname ;; 446 | "20") run_platform_playbook nginx ;; 447 | "30") run_platform_playbook ovpn-server ;; 448 | "31") run_platform_playbook "ssl,httpd" "-e ssl_force=1" ;; 449 | "32") switch_support_value ;; 450 | "33") switch_ssl_challenge ;; 451 | "34") run_platform_playbook fakeshop ;; 452 | "00") exit 0 ;; 453 | *) echo "Unknown action '${OPTION}'" ;; 454 | esac 455 | # sleep 0.5 456 | } 457 | 458 | # initial configuration 459 | initialize 460 | # start menu loop 461 | while [ 1 ]; do 462 | menu 463 | done 464 | -------------------------------------------------------------------------------- /bin/loader: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | if test -t 1; then # if terminal 4 | ncolors=$(which tput > /dev/null && tput colors) # supports color 5 | if test -n "$ncolors" && test $ncolors -ge 8; then 6 | termcols=$(tput cols) 7 | bold="$(tput bold)" 8 | underline="$(tput smul)" 9 | standout="$(tput smso)" 10 | normal="$(tput sgr0)" 11 | black="$(tput setaf 0)" 12 | red="$(tput setaf 1)" 13 | green="$(tput setaf 2)" 14 | yellow="$(tput setaf 3)" 15 | blue="$(tput setaf 4)" 16 | magenta="$(tput setaf 5)" 17 | cyan="$(tput setaf 6)" 18 | white="$(tput setaf 7)" 19 | fi 20 | fi 21 | 22 | base_print() { 23 | echo -e "\n$1\n" 24 | } 25 | 26 | print_status() { 27 | base_print "${cyan}${bold}>> ${blue}${1}${normal}" 28 | } 29 | 30 | print_pref() { 31 | base_print "${yellow}${bold}!> ${normal}${1}" 32 | } 33 | 34 | print_pref "##" 35 | print_pref "##" 36 | print_pref "##" 37 | print_pref "## Current installer is deprecated." 38 | -------------------------------------------------------------------------------- /bin/ycloud_vdb: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Usage: curl -s https://raw.githubusercontent.com/rockstat/bootstrap/master/bin/ycloud_vdb | sudo bash - 3 | 4 | set -e 5 | 6 | DISK=/dev/vdb 7 | 8 | if [ ! -e $DISK ] ; then 9 | echo "Device $DISK not exists. Break." 10 | exit 1 11 | fi 12 | 13 | if grep $DISK /etc/mtab > /dev/null 2>&1; then 14 | echo "Disk already mounted. Skip formatting" 15 | else 16 | mkfs.ext4 -m 0 -F -E lazy_itable_init=0,lazy_journal_init=0,discard $DISK 17 | chmod a+w /srv 18 | echo "Disk formatted." 19 | fi 20 | 21 | SDBID=`blkid $DISK | cut -d' ' -f2 | cut -d\" -f2` 22 | 23 | if grep $SDBID /etc/fstab > /dev/null 2>&1; then 24 | echo "Record already in fstab. Removing..." 25 | grep -v "/srv" /etc/fstab > temp && mv temp /etc/fstab 26 | chmod 0744 /etc/fstab 27 | chown root:root /etc/fstab 28 | fi 29 | 30 | cp /etc/fstab /etc/fstab.backup 31 | echo -e "UUID=$SDBID /srv ext4 defaults,nofail 0 2\n" >> /etc/fstab 32 | echo "Record added to fstab." 33 | 34 | 35 | echo "Done! You HAVE TO reboot server. Type 'sudo reboot' and press enter" -------------------------------------------------------------------------------- /clickhouse_migrations/.keep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rockstat/bootstrap/4e974e3ad79dc337a940fac13536d9cd7a0bffa7/clickhouse_migrations/.keep -------------------------------------------------------------------------------- /clickhouse_migrations/001-recreate-database.tmp: -------------------------------------------------------------------------------- 1 | - DROP DATABASE IF EXISTS stats 2 | - CREATE DATABASE IF NOT EXISTS stats 3 | - CREATE TABLE IF NOT EXISTS stats.migrations (name String) ENGINE = Log 4 | -------------------------------------------------------------------------------- /clickhouse_migrations/002-channel-to-string.yml: -------------------------------------------------------------------------------- 1 | - ALTER TABLE stats.events MODIFY COLUMN channel String 2 | - ALTER TABLE stats.activity MODIFY COLUMN channel String 3 | - ALTER TABLE stats.webhooks MODIFY COLUMN channel String 4 | -------------------------------------------------------------------------------- /clickhouse_migrations/003-scheme-to-string.yml: -------------------------------------------------------------------------------- 1 | - ALTER TABLE stats.events MODIFY COLUMN page_scheme String 2 | - ALTER TABLE stats.activity MODIFY COLUMN page_scheme String 3 | - ALTER TABLE stats.webhooks MODIFY COLUMN page_scheme String 4 | -------------------------------------------------------------------------------- /config_to_email.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Sending letters 3 | hosts: all 4 | remote_user: '{{support_user}}' 5 | tasks: 6 | - import_role: 7 | name: alco-pechkin 8 | vars: 9 | amail_ovpn_keys_dir: '{{alco_ovpn_keys_dir}}' 10 | amail_sender: '{{mailgun_sender}}' 11 | amail_subj_key: '{{alco_email_subj_key}}' 12 | amail_subj_snip: '{{alco_email_subj_snip}}' 13 | amail_enabled: '{{alco_email_enabled}}' 14 | amail_mailgun_key: '{{mailgun_api_key}}' 15 | amail_to: '{{contact_email}}' 16 | -------------------------------------------------------------------------------- /convert_users.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Installing platform 3 | hosts: rockstat 4 | become: yes 5 | vars_files: 6 | - vars/config.yml 7 | vars: 8 | _nginx_users: {} 9 | _nginx_admin: {} 10 | tasks: 11 | - block: 12 | - command: 'cat /srv/platform/bootstrap/files/users.yml' 13 | register: users_out 14 | failed_when: 'False' 15 | - block: 16 | - debug: var=pb_users 17 | - debug: var=users_data 18 | - debug: msg="loc_gen:{{loc_gen}}" 19 | - debug: msg="loc_conf:{{loc_conf}}" 20 | - debug: msg="loc_admpwd:{{loc_admpwd}}" 21 | - debug: msg="pb_woadmin:{{pb_notadmin}}" 22 | - debug: msg="pb_admin:{{pb_admin}}" 23 | - debug: var=loc_users_raw 24 | - debug: var=loc_users_xx 25 | - debug: msg="loc_users:{{loc_users}}" 26 | - set_fact: 27 | _nginx_users: "{{ _nginx_users | combine({row.0: row.0+':'+row.1}) }}" 28 | vars: 29 | row: "{{item.split(':')}}" 30 | loop: "{{users_list|flatten}}" 31 | - set_fact: 32 | _nginx_admin: "{{ _nginx_admin | combine({row.0: row.1}) }}" 33 | vars: 34 | row: "{{item.split(':')}}" 35 | loop: "{{admin_list|flatten}}" 36 | - file: 37 | path: /etc/ansible/facts.d 38 | state: directory 39 | - template: 40 | src: 'facts.d/config.fact.j2' 41 | dest: /etc/ansible/facts.d/config.fact 42 | vars: 43 | _facts_dict: "{{ loc_gen|combine({'admin_password': (_nginx_admin.values()|list)[0]})}}" 44 | vars: 45 | users_data: "{{ (users_out.rc == 0) | ternary(users_out.stdout|from_yaml, {}) }}" 46 | pb_users: "{{ users_data.iternal_users | default(users_data.users|default([])) }}" 47 | loc_conf: "{{ ansible_local.config | default({}) }}" 48 | loc_gen: "{{ loc_conf.general | default({}) }}" 49 | loc_admpwd: "{{ loc_gen.admin_password | default('') }}" 50 | loc_adm: "{{ (loc_admpwd != '')|ternary(['admin:'+loc_admpwd], []) }}" 51 | init_adm: "{{ ['admin:'+init_admin_pass|apr1pass] if init_admin_pass is defined else [] }}" 52 | loc_users_raw: "{{ loc_gen.users|default('') }}" 53 | loc_users: "{{ (loc_users_raw == '')|ternary(' [] ', loc_users_raw)|from_json }}" 54 | pb_notadmin: "{{ pb_users | select('match', '^(?!admin).*$') | list }}" 55 | pb_admin: "{{ pb_users | select('match', '^admin.*$') | list }}" 56 | users_list: 57 | - "{{loc_users}}" 58 | - "{{pb_notadmin}}" 59 | admin_list: 60 | - "{{init_adm}}" 61 | - "{{ 'admin:'+((users_list|flatten)[0].split(':'))[1] }}" 62 | - "{{pb_admin}}" 63 | - "{{loc_adm}}" 64 | 65 | # when: "http_users|length > 0" 66 | 67 | -------------------------------------------------------------------------------- /filter_plugins/ips.py: -------------------------------------------------------------------------------- 1 | from __future__ import (unicode_literals, absolute_import, division, print_function) 2 | import six 3 | import netaddr 4 | 5 | def ip_pattern_to_net(pattern): 6 | bits = 32 - str(pattern).count('*') * 8 7 | lostbits = 24 - str(pattern).count('.') * 8 8 | network = str.replace(str(pattern), '*', '0') + ('.0' * int(round(lostbits/8))) 9 | return "%s/%s" % (network, bits - lostbits) 10 | 11 | def net_gateway(netstr): 12 | net = netaddr.IPNetwork(str(netstr)) 13 | return "%s" % (net[1]) 14 | 15 | 16 | class FilterModule (object): 17 | def filters(self): 18 | return { 19 | "ip_p2n": ip_pattern_to_net, 20 | "net_gw": net_gateway 21 | } 22 | -------------------------------------------------------------------------------- /filter_plugins/ownhtpass.py: -------------------------------------------------------------------------------- 1 | from __future__ import (unicode_literals, absolute_import, division, print_function) 2 | import six 3 | from passlib.apache import HtpasswdFile, CryptContext 4 | 5 | myctx = CryptContext(schemes=["apr_md5_crypt"]) 6 | 7 | def apr1pass(orig): 8 | return myctx.hash(orig) 9 | 10 | 11 | class FilterModule (object): 12 | def filters(self): 13 | return { 14 | "apr1pass": apr1pass 15 | } 16 | -------------------------------------------------------------------------------- /google_cl.json: -------------------------------------------------------------------------------- 1 | {"installed":{"client_id":"1003368909379-7jl7kn15bj2ec3kf2b15vtf1bntidcf2.apps.googleusercontent.com","project_id":"digitalgod-222221","auth_uri":"https://accounts.google.com/o/oauth2/auth","token_uri":"https://oauth2.googleapis.com/token","auth_provider_x509_cert_url":"https://www.googleapis.com/oauth2/v1/certs","client_secret":"kpUP_EtPdsx6TRWfV-wR2bjI","redirect_uris":["urn:ietf:wg:oauth:2.0:oob","http://localhost"]}} -------------------------------------------------------------------------------- /group_vars/all.yml: -------------------------------------------------------------------------------- 1 | # playbook version fill automatically 2 | playbook_version: 3.10.3 3 | branch: master 4 | # ########## google site verifications file name ########## 5 | _google_webmaster: "" # like google65679391aa745584.html 6 | # ########## Features ########## 7 | setup_server: yes 8 | setup_nginx: yes 9 | _setup_ssl: yes 10 | setup_jupyter: yes 11 | setup_theia: yes 12 | metrics_server: no 13 | # will runned by director at first start 14 | _initial_startup: [mmgeo, sxgeo, uaparser] 15 | # sysuser 16 | rsys_creds: [ "rsys", "" ] 17 | ruser_creds: [ "ruser", "" ] 18 | # main user. (temp) 19 | main_user: default 20 | main_user_password: default 21 | # system users 22 | support_user: 'x011' 23 | _uid: 472 24 | _gid: 472 25 | remote_user: "{{ support_user }}" 26 | #key for update manager to access server 27 | support_key_file: './keys/support.pub' 28 | support_key: "{{ lookup('file', support_key_file) }}" 29 | # ########## emitting configuration ########## 30 | customs: "{{_customs|default({})}}" 31 | _local_config: "{{ (ansible_local.config|default({})).general|default({}) }}" 32 | _random_pass: "{{ lookup('lines', 'sh -c \"openssl rand -base64 18 | openssl passwd -apr1 -stdin\"') }}" 33 | _fullname: "{{ realname|default(_local_config.fullname|default('Aldous Huxley'))}}" 34 | _email: "{{ email|default(_local_config.email|default('hello@example.com'))}}" 35 | _admin_password: "{{admin_password|default(_local_config.admin_password|default(_random_pass))}}" 36 | _enable_support: "{{enable_support|default(_local_config.enable_support|default('1')|bool)}}" 37 | _dns_challenge: "{{_local_config.dns_challenge|default('http')}}" 38 | _ssl_wildcard: "{{ssl_wildcard|default(_dns_challenge == 'dns')|bool}}" 39 | _tag: "{{ (branch == 'master')|ternary('latest', branch) }}" 40 | # ########## Domains ########## 41 | _domain: "{{_local_config.domain|default(ansible_host)}}" 42 | _subdomains_default: [app, web, grafana, jupyter, theia, netdata, demo, chproxy] 43 | _subdomains: "{{ (_subdomains_default + _subdomains_extra|default([])) }}" 44 | _domains: "{{ _subdomains | map('regex_replace', '^(.*)$', '\\1.' + _domain)|list }}" 45 | _domains_extra: [ ] 46 | # ########## SSL ########## 47 | domains_ssl: 48 | - email: '{{email}}' 49 | domains: "{{[_domain] + _domains.values()|list|unique}}" 50 | # ########## Repos ########## 51 | _git_organization: https://github.com/rockstat 52 | repos: 53 | band_set: "{{_git_organization}}/band-services-set" 54 | dashboard: "{{_git_organization}}/dashboard.git" 55 | theia: "{{_git_organization}}/theia-ide-image.git" 56 | splash: "{{_git_organization}}/splash.git" 57 | # ########## Images ########## 58 | _images: 59 | band: "rockstat/band-base-py:{{_tag}}" 60 | director: "rockstat/director:{{_tag}}" 61 | front: "rockstat/front:{{_tag}}" 62 | chwriter: "rockstat/chwriter:{{_tag}}" 63 | ebaloger: rockstat/ebaloger 64 | theia: "rockstat/theia-ide:{{_tag}}" 65 | heavyload: rockstat/heavyload 66 | anaconda: rockstat/anaconda 67 | httpdebug: rockstat/httpdebug 68 | logspout: gliderlabs/logspout 69 | chproxy: rockstat/chproxy 70 | redis: redis:4-alpine 71 | netdata: netdata/netdata 72 | grafana: rockstat/grafana 73 | fakeshop: madiedinro/fake-shop 74 | images: "{{_images|combine(_images_extra|default({}))}}" 75 | # ########## Logs ########## 76 | _loggly_hostname: logs-01.loggly... 77 | _loggly_api_key: 10bbf2d8... 78 | _papertrail_hostname: logs7.pape.... 79 | _logspout_target: none 80 | _logspout_enabled: yes 81 | # ########## dirs ########## 82 | home_dir: '/srv/platform' 83 | data_dir: '{{home_dir}}/data' 84 | etc_dir: '{{home_dir}}/etc' 85 | build_dir: '{{home_dir}}/build' 86 | images_dir: "{{home_dir}}/images" 87 | libraries_dir: "{{home_dir}}/libraries" 88 | dirs: 89 | etc_dir: "{{etc_dir}}" 90 | front_custom_config: "{{data_dir}}/front_custom_config" 91 | chwriter_custom_config: "{{data_dir}}/chwriter_custom_config" 92 | chwriter_custom_migrations: "{{data_dir}}/chwriter_custom_migrations" 93 | chwriter_emergency: "{{data_dir}}/chwriter_emergency" 94 | band: "{{images_dir}}/band_base" 95 | band_set: "{{images_dir}}/band_set" 96 | rockme_set: "{{images_dir}}/rockme_set" 97 | director_data: "{{data_dir}}/director" 98 | environments: "{{data_dir}}/environments" 99 | user_images: "{{images_dir}}/user" 100 | user_libraries: "{{libraries_dir}}/user" 101 | workspace: "{{data_dir}}/workspace" 102 | notebooks: "{{data_dir}}/notebooks" 103 | uploads: "{{data_dir}}/uploads" 104 | grafana_data: "{{home_dir}}/grafana" 105 | splash: "{{home_dir}}/splash" 106 | public: "{{data_dir}}/public" 107 | well_known: "{{home_dir}}/well_known" 108 | dashboard: "{{home_dir}}/dashboard" 109 | ovpnkeys: "{{home_dir}}/ovpn_keys" 110 | acmedns: "{{home_dir}}/acmedns" 111 | acmesh: "{{home_dir}}/acmesh" 112 | certs: "{{data_dir}}/certs" 113 | clickhouse: "/srv/clickhouse" 114 | clickhouse_tmp: "/srv/clickhouse_tmp" 115 | clickhouse_log: "/var/log/clickhouse-server" 116 | redis: "/srv/redis" 117 | create_dirs: 118 | - ["{{ images_dir }}", "{{ build_dir }}", "{{ data_dir }}", "{{ etc_dir }}", ] 119 | - ["{{ dirs.well_known }}","{{ dirs.certs }}", "{{ dirs.user_images }}"] 120 | - ["{{ dirs.workspace }}", "{{ dirs.public }}", "{{ dirs.grafana_data }}"] 121 | - ["{{ dirs.front_custom_config }}", "{{ dirs.chwriter_custom_config }}"] 122 | - ["{{ dirs.chwriter_custom_migrations }}", "{{ dirs.environments }}"] 123 | - ["{{ dirs.user_libraries }}"] 124 | in_dirs: 125 | etc: /usr/platform/etc 126 | # ssl files 127 | _ssl_cert_fullchain: fullchain.cer 128 | _ssl_cert: cert.cer 129 | _ssl_cert_key: priv.key 130 | # ########## ports ########## 131 | _ports: 132 | # services 133 | director: [10000, 8080] 134 | front: [10001, 8080] 135 | front_ws: [10002, 8082] 136 | heavyload: [10010, 8080] 137 | httpdebug: [8089,8080] 138 | # tools 139 | logspout: 140 | http: [8085, 8080] 141 | udp: [5055, 8090] 142 | grafana: [3000, 3000] 143 | jupyter: [8888, 8080] 144 | theia: [8887, 8000] 145 | theia_dev: [8886, 8080] 146 | netdata: [19999, 19999] 147 | fakeshop: [8087, 8080] 148 | # servers 149 | http: [80] 150 | https: [443] 151 | letsencrypt: [8011, 80] 152 | openvpn: [8080, 1194] 153 | redis: [6379, 6379] 154 | s2svpn: [8079] 155 | clickhouse_tcp: [9000] 156 | clickhouse: [8123] 157 | chproxy: [9090, 9090] 158 | chronograf: [18888] 159 | influxhttp: [18086] 160 | influxtsb: [4242] 161 | statsd: [8125, 8125] 162 | ports: "{{_ports|combine(_ports_extra|default({}))}}" 163 | # Mem limits 164 | anaconda_mem_limit: 2g 165 | # ########## Docker ########## 166 | # user defined docker network 167 | docker_net_name: custom 168 | docker_interface: docker1 169 | docker_net_pattern: 172.16.25.* # will be converted to 172.16.25.0/24 170 | docker_users: 171 | - '{{support_user}}' 172 | # calculable 173 | docker_net: "{{docker_net_pattern|ip_p2n}}" 174 | docker_host_ip: "{{docker_net|net_gw}}" 175 | docker_netmask: "{{docker_net|ipaddr('netmask')}}" 176 | docker_net_net: "{{docker_net|ipaddr('network')}}" 177 | # default docker network 178 | docker_defnet_pattern: 172.17.* 179 | docker_defnet_net: "{{docker_defnet_pattern|ip_p2n}}" 180 | # other params 181 | docker_band_lbls: { inband: system } 182 | _label_band_managed: { role: managed } 183 | _restart_policy: unless-stopped 184 | # ########## Clients VPN ########## 185 | covpn_keys_dir: '{{home_dir}}/ovpn_keys' 186 | # ########## Server-server VPN ########## 187 | s2s_vpn_connect: false 188 | s2s_vpn_key: 'ovpn_keys/node{{vpn_id}}.ovpn' 189 | s2s_vpn_net_pattern: 192.168.222.* 190 | s2s_vpn_net: "{{s2s_vpn_net_pattern|ip_p2n}}" 191 | s2s_vpn_host_ip: "{{s2s_vpn_net|net_gw}}" 192 | # ########## Interfaces ########## 193 | if_outer: "{{ansible_default_ipv4.address}}" 194 | if_inner: "{{docker_host_ip}}" 195 | local_nets: [ "127.0.0.0/8", "{{s2s_vpn_net}}", "{{docker_net}}", "{{docker_defnet_net}}" ] 196 | etc_hosts: # also used in docker containers 197 | host: "{{if_inner}}" 198 | # ########## Firewall ########## 199 | disable_ipv6: false 200 | firewall_rules: 201 | - { from_ip: '{{docker_net}}'} 202 | - { to_port: '{{ports.http.0}}'} 203 | - { to_port: '{{ports.https.0}}'} 204 | - { to_port: '{{ports.openvpn.0}}'} 205 | # ########## Jupyter/anaconda packages ########## 206 | # install libs to python containers: anaconda and theia 207 | _dev_py_libs: ["msgpack", "ujson", "prodict", "arrow", "simplech", "asimplech"] 208 | dev_py_libs: "{{_dev_env_py_libs|combine(_dev_py_libs_extra|default({})) }}" 209 | # ########## ClickHouse ########## 210 | _clickhouse_profiles_custom: 211 | sys_prof: { max_memory_usage: 3000000000, use_uncompressed_cache: 0, load_balancing: random } 212 | user_prof: { max_memory_usage: 2000000000, use_uncompressed_cache: 0, load_balancing: random } 213 | _clickhouse_users_custom: 214 | - { name: "{{rsys_creds.0}}", password: "{{rsys_creds.1}}", networks: "{{ local_nets }}", profile: "sys_prof", quota: "default" } 215 | - { name: "{{ruser_creds.0}}", password: "{{ruser_creds.1}}", networks: "{{ local_nets }}", profile: "user_prof", quota: "default" } 216 | _clickhouse_logger: 217 | level: trace 218 | log: "{{ dirs.clickhouse_log }}/clickhouse-server.log" 219 | errorlog: "{{ dirs.clickhouse_log }}/clickhouse-server.err.log" 220 | size: 50M 221 | count: 7 222 | ch_db: stats 223 | # ########## CH PROXY ########## 224 | _chproxy_defs: 225 | max_concurrent_queries: 3 226 | allow_cors: yes 227 | max_queue_time: 30s 228 | max_execution_time: 30s 229 | max_queue_size: 100 230 | to_cluster: local 231 | _chproxy_allowed_networks_default: "{{local_nets}}" 232 | _chproxy_clusters_default: 233 | - name: local 234 | nodes: [ "{{if_inner}}:{{ports.clickhouse.0}}" ] 235 | users: 236 | - { name: readonly, password: "" } 237 | - { name: default, password: "" } 238 | _chproxy_users_default: 239 | - { to_user: 'readonly', name: 'readonly', password: 'readonly'} 240 | - { to_user: 'default', name: 'default', password: 'default'} 241 | # ########## CH Common ########## 242 | _chp_dsn: "http://{{main_user}}:{{main_user_password}}@host:{{ports.chproxy.0}}/{{ch_db}}" 243 | _chs_dsn: "http://{{main_user}}@host:{{ports.clickhouse.0}}/{{ch_db}}" 244 | # ########## Services env ########## 245 | _containers_env: 246 | director: 247 | #ENCODED_ENV will be set in playbook 248 | PORT: "{{ports.director.1}}" 249 | IMAGES_PATH: "/images" 250 | INITIAL_STARTUP: "{{(_initial_startup + _initial_startup_extra|default([]))|to_json}}" 251 | BIND_HOST_ADDR: "{{ if_inner }}" 252 | BAND_PY_IMAGE: "{{_images.band}}" 253 | theia: 254 | PORT_THEIA: "{{ports.theia.1}}" 255 | PORT_DEV: "{{ports.theia_dev.1}}" 256 | heavyload: 257 | WEBHOOK: "http://front:{{ports.front.1}}/upload/{{ '{{.service}}' }}/{{ '{{.name}}' }}" 258 | # for user overrides 259 | _containers_env_extra: {} 260 | # ########## Common Envs ########## 261 | _common_env: 262 | JSON_LOGS: "1" 263 | LOG_LEVEL: "info" 264 | DOMAIN: "{{_domain}}" 265 | HOST_ADDR: "{{if_inner}}" 266 | STATSD_HOST: "{{if_inner}}" 267 | NETWORK: "{{docker_net_name}}" 268 | REDIS_DSN: "redis://redis:6379" 269 | FULLNAME: "{{_fullname}}" 270 | EMAIL: "{{_email}}" 271 | UPLOADS_DIR: "{{dirs.uploads}}" 272 | CH_DSN: "{{_chp_dsn}}" 273 | CHP_DSN: "{{_chp_dsn}}" 274 | CHS_DSN: "{{_chs_dsn}}" 275 | # for user overrides `_common_env_extra` 276 | common_env: "{{_common_env|combine(_common_env_extra|default({}))}}" 277 | container_env: "{{ {}|combine( common_env, _containers_env[service]|default({}), _containers_env_extra[service]|default({}) ) }}" 278 | 279 | # ########## NGINX COMMONS ########## 280 | # common blocks 281 | _nginx_listen: 282 | - "listen 443 ssl http2" 283 | _nginx_ssl: 284 | - ssl_certificate_key {{dirs.certs}}/{{_domain}}/{{_ssl_cert_key}} 285 | - ssl_certificate {{dirs.certs}}/{{_domain}}/{{_ssl_cert_fullchain}} 286 | - ssl_trusted_certificate {{dirs.certs}}/{{_domain}}/{{_ssl_cert}} 287 | _nginx_nocache: 288 | - add_header 'Cache-Control' 'no-store; must-revalidate' 289 | - expires off 290 | _nginx_deny_dots: 291 | - location ~ /\. { deny all; return 404; } 292 | _nginx_auth: 293 | - 'auth_basic "Restricted"' 294 | - "auth_basic_user_file auth_basic/common" 295 | _nginx_cors_loc_common: | 296 | add_header 'Access-Control-Allow-Origin' '*'; 297 | add_header 'Access-Control-Allow-Headers' 'Cache-Control'; 298 | _nginx_cors_loc: | 299 | if ($request_method = 'OPTIONS') { 300 | {{ _nginx_cors_loc_common }} 301 | add_header 'Access-Control-Max-Age' 1728000; 302 | add_header 'Content-Type' 'text/plain; charset=utf-8'; 303 | add_header 'Content-Length' 0; 304 | return 204; 305 | } 306 | if ($request_method = 'POST') { {{ _nginx_cors_loc_common }} add_header 'Access-Control-Expose-Headers' 'Content-Length,Content-Range'; } 307 | if ($request_method = 'GET') { {{ _nginx_cors_loc_common }} add_header 'Access-Control-Expose-Headers' 'Content-Length,Content-Range'; } 308 | _nginx_base_vh: "{{ _nginx_listen + _nginx_ssl }}" 309 | # specific configurations 310 | _nginx_proxy_vh: 311 | - server_name {sd}.{{_domain}} 312 | - location / { {{_nginx_cors_loc }} proxy_pass http://u{sd}; } 313 | - "{{_nginx_base_vh}}" 314 | _nginx_private_proxy_vh: 315 | - "{{_nginx_auth}}" 316 | - "{{_nginx_proxy_vh}}" 317 | - client_max_body_size 100M 318 | _nginx_defvh_http: 319 | - server_name _http 320 | - listen 80 default_server 321 | - location /.well-known/ { auth_basic off; proxy_pass http://uletsencrypt; } 322 | - location /stub_status { stub_status; allow 127.0.0.1; deny all; access_log off;} 323 | - location / { return 301 https://$host$request_uri; } 324 | _nginx_defvh_https: 325 | - "server_name _https" 326 | - "{{_nginx_listen.0}} default_server" 327 | - "{{_nginx_ssl}}" 328 | - "root {{dirs.splash}}" 329 | - "location / { try_files $uri /index.html; }" 330 | _nginx_upload_loc: 331 | - location /upload { 332 | client_max_body_size 500m; 333 | client_body_buffer_size 128K; 334 | limit_except POST { deny all; } 335 | proxy_pass http://uheavyload; } 336 | _nginx_front: 337 | - server_name {{_domain}} 338 | - "{{_nginx_base_vh}}" 339 | - "{{_nginx_deny_dots}}" 340 | - "root {{dirs.splash}}" 341 | - location / { try_files $uri $uri/index.html @upstream; } 342 | - location /public { {{_nginx_cors_loc}} alias {{ dirs.public }}; } 343 | - location /wss { proxy_pass http://ufront_ws; } 344 | - "{{_nginx_upload_loc}}" 345 | - "{{_nginx_front_extra|default([])}}" 346 | - location @upstream { proxy_pass http://ufront; } 347 | _nginx_dashboard: 348 | - server_name app.{{_domain}} 349 | - "{{_nginx_base_vh}}" 350 | - "{{_nginx_auth}}" 351 | - "root {{dirs.dashboard}}/dist" 352 | - location /api/ { proxy_pass http://udirector/; } 353 | - location /ws { proxy_pass http://udirector; } 354 | - location / { try_files $uri /index.html; } 355 | _nginx_grafana: 356 | - "{{_nginx_private_proxy_vh}}" 357 | - location /public/ { auth_basic off; proxy_pass http://u{sd}; } 358 | - proxy_set_header X-WEBAUTH-USER $remote_user 359 | - proxy_hide_header Authorization 360 | _nginx_netdata: 361 | - access_log off 362 | - "{{_nginx_private_proxy_vh|flatten|map('replace','{sd}','netdata')|list}}" 363 | _nginx_sites: 364 | def_https: "{{_nginx_defvh_https|flatten }}" 365 | def_http: "{{_nginx_defvh_http|flatten }}" 366 | front: "{{_nginx_front|flatten }}" 367 | dashboard: "{{_nginx_dashboard|flatten }}" 368 | grafana: "{{_nginx_grafana|flatten|map('replace','{sd}','grafana')|list}}" 369 | theia: "{{_nginx_private_proxy_vh|flatten|map('replace','{sd}','theia')|list}}" 370 | demo: "{{_nginx_private_proxy_vh|flatten|map('replace','{sd}','demo')|list}}" 371 | jupyter: "{{_nginx_private_proxy_vh|flatten|map('replace','{sd}','jupyter')|list}}" 372 | netdata: "{{_nginx_netdata|flatten}}" 373 | _nginx_upstreams: 374 | - 'upstream ufront { server {{if_inner}}:{{ports.front.0}}; }' 375 | - 'upstream ufront_ws { server {{if_inner}}:{{ports.front_ws.0}}; }' 376 | - 'upstream udirector { server {{if_inner}}:{{ports.director.0}}; }' 377 | - 'upstream uheavyload { server {{if_inner}}:{{ports.heavyload.0}}; }' 378 | - 'upstream ugrafana { server {{if_inner}}:{{ports.grafana.0}}; }' 379 | - 'upstream ujupyter { server {{if_inner}}:{{ports.jupyter.0}}; }' 380 | - 'upstream unetdata { server {{if_inner}}:{{ports.netdata.0}}; }' 381 | - 'upstream utheia { server {{if_inner}}:{{ports.theia.0}}; }' 382 | - 'upstream uletsencrypt { server {{if_inner}}:{{ports.letsencrypt.0}}; }' 383 | - 'upstream udemo { server {{if_inner}}:{{ports.fakeshop.0}}; }' 384 | 385 | # ########## Other things ########## 386 | hostname: "{{inventory_hostname_short|default(inventory_hostname)}}" 387 | # ########## Mailer config ########## 388 | mailer_subj_key: "Ваши ключи OpenVPN ({{ _domain }}) v{{ playbook_version }}" 389 | mailer_subj_snip: "Сниппет для сайта ({{ _domain }}) v{{ playbook_version }}" 390 | -------------------------------------------------------------------------------- /group_vars/wildcard.yml: -------------------------------------------------------------------------------- 1 | _ssl_wildcard: yes 2 | -------------------------------------------------------------------------------- /install_roles.yml: -------------------------------------------------------------------------------- 1 | # install_roles.yml 2 | - src: https://github.com/madiedinro/ansible-server-role 3 | version: master 4 | name: dr.server 5 | 6 | - src: https://github.com/madiedinro/ansible-exposeur-role 7 | version: master 8 | name: dr.exposeur 9 | 10 | - src: https://github.com/madiedinro/ansible-netdata-role 11 | version: master 12 | name: dr.netdata 13 | 14 | - src: https://github.com/madiedinro/ansible-docker-role 15 | version: master 16 | name: dr.docker 17 | 18 | - src: https://github.com/madiedinro/ansible-docker-container-role 19 | version: master 20 | name: dr.docker-container 21 | 22 | - src: https://github.com/madiedinro/ansible-postgres-container-role 23 | version: master 24 | name: dr.postgres-container 25 | 26 | - src: https://github.com/madiedinro/ansible-chronograf-role 27 | version: master 28 | name: dr.chronograf 29 | 30 | - src: https://github.com/AlexeySetevoi/ansible-clickhouse 31 | version: master 32 | name: AlexeySetevoi.clickhouse 33 | 34 | - src: https://github.com/jdauphant/ansible-role-nginx 35 | version: master 36 | name: jdauphant.nginx 37 | 38 | - src: https://github.com/madiedinro/ansible-influxdb-role.git 39 | version: master 40 | name: dr.influxdb 41 | 42 | - src: https://github.com/madiedinro/ansible-letsencrypt-wildcatd-dns-auto.git 43 | version: master 44 | name: dr.letsencrypt.wildcard.auto 45 | 46 | - src: https://github.com/madiedinro/ansible-openvpn-client-role 47 | version: master 48 | name: dr.openvpn-client 49 | 50 | - src: https://github.com/madiedinro/ansible-openvpn-server-role 51 | version: master 52 | name: dr.openvpn-server 53 | 54 | - src: https://github.com/madiedinro/ansible-static-service-role 55 | version: master 56 | name: dr.static-service 57 | -------------------------------------------------------------------------------- /inventory/.keep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rockstat/bootstrap/4e974e3ad79dc337a940fac13536d9cd7a0bffa7/inventory/.keep -------------------------------------------------------------------------------- /inventory/all: -------------------------------------------------------------------------------- 1 | ### 2 | # It's template for your personal inventory file 3 | # Make a copy with name private or any other starts with "priv" 4 | # host parameters: 5 | # ansible_host: host.ru # required / domain or ip of server 6 | # contact_email: you@example.com # optional / email to receive notifoication about sertificate coming outdated 7 | # 8 | # Replace "youstat" with short name of your server. Will be used as internal hostname 9 | 10 | [all:vars] 11 | ansible_python_interpreter=/usr/bin/python3 12 | 13 | #[private] 14 | #youstat ansible_host=alco.yourdomain.com contact_email= 15 | 16 | #[rockstat] 17 | #youstat 18 | -------------------------------------------------------------------------------- /inventory/template-private.yml: -------------------------------------------------------------------------------- 1 | # Template for yaml hosts configuration 2 | # all: 3 | # hosts: 4 | # somestat: 5 | # tracker_domain: alco.yourdomain.com 6 | # vars: 7 | # contact_email: youemail@example.com 8 | # children: 9 | # rockstat: 10 | # somestat: {} 11 | -------------------------------------------------------------------------------- /keys/support.pub: -------------------------------------------------------------------------------- 1 | ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDXbFYLDz0aJzdbHhHsMPCq3AowMBiEh1arLSFv329t+ENyoFav43nN45HSKTCX3uCa2s3nXX2wvLcQ5IcZQQY9wldeuHpiLV5slBN7x2JoA1zDx4a3DJdq9N2hBw4ETiJ+IyUBQPCJ1++zu+HGiWGT/PDHP1c3DzQkUJ4onMZvPR+P9dwRX8W8mZzL5jTV4DOhgpXGm6wIcmvSswWhqx2QjtMzL3KU/mPbgAv2xdUzAVsDw+szvTILyP2pp5Io21qOURZkso0/LAm3Uqo229Nv1hhVKSayjWDf7bAPE6Qk0xHgC42EFK5dLjlhqgBp3msQNgOK9cCCiUJ0W+Fjax4t hello@alcolytics.ru 2 | -------------------------------------------------------------------------------- /makefile: -------------------------------------------------------------------------------- 1 | BR := $(shell git branch | grep \* | cut -d ' ' -f2-) 2 | bump-patch: 3 | bumpversion patch 4 | 5 | password: 6 | openssl passwd -apr1 7 | 8 | bump-minor: 9 | bumpversion minor 10 | 11 | demo: 12 | docker run -it --rm -v `pwd`:/playbook:ro ubuntu:16.04 bash 13 | 14 | up_master: 15 | @echo "on branch $(BR)" 16 | 17 | @[ "$(BR)" == "dev" ] && true || (echo "only dev can be used. you on $(BR)" && exit 1) 18 | @[ -z "$(git status --porcelain)" ] && true || (echo "directory not clean. commit changes first" && exit 1) 19 | @git checkout master && git merge dev && git push origin master && git checkout dev \ 20 | && echo "master merged and pushed" 21 | 22 | to_master: 23 | @echo $(BR) 24 | git checkout master && git merge $(BR) && git checkout $(BR) 25 | 26 | push: 27 | git push origin master 28 | git push origin dev 29 | 30 | hz_servers: 31 | @echo "quering servers list" 32 | @source .env && curl -s \ 33 | -H "Content-Type: application/json" \ 34 | -H "Authorization: Bearer $${HETZNER_API_KEY}" https://api.hetzner.cloud/v1/servers \ 35 | | jq '.servers | .[] | {id: .id, name: .name, status: .status, image: .image.name}' 36 | 37 | hz_test_rebuild: 38 | @source .env && curl -s -H "Content-Type: application/json" -H "Authorization: Bearer $${HETZNER_API_KEY}" \ 39 | -d '{"image": "ubuntu-16.04"}' \ 40 | -X POST "https://api.hetzner.cloud/v1/servers/$${HETZNER_TEST_SRV}/actions/rebuild" | jq 41 | 42 | hz_stage_rebuild: 43 | source .env && curl -s -H "Content-Type: application/json" -H "Authorization: Bearer $${HETZNER_API_KEY}" \ 44 | -d '{"image": "ubuntu-16.04"}' \ 45 | -X POST "https://api.hetzner.cloud/v1/servers/$${HETZNET_STAGE_SRV}/actions/rebuild" | jq 46 | sleep 60 47 | 48 | 49 | 50 | playbook_stage_init: 51 | ansible-playbook os_init.yml --limit=stage -e wait_server=1 52 | 53 | playbook_stage_full: 54 | ansible-playbook platform.yml --limit=stage --tags=full 55 | 56 | playbook_stage_platform: 57 | ansible-playbook platform.yml --limit=stage --tags=platform -e branch=dev 58 | 59 | 60 | 61 | stage_rebuild: hz_stage_rebuild playbook_stage_init playbook_stage_full 62 | 63 | 64 | playbook_test_init: 65 | ansible-playbook os_init.yml --limit=test -e wait_server=1 66 | 67 | playbook_test_full: 68 | ansible-playbook platform.yml --limit=test --tags=full -e branch=dev 69 | 70 | playbook_test_platform: 71 | ansible-playbook platform.yml --limit=test --tags=platform -e branch=dev 72 | 73 | test_rebuild: hz_test_rebuild playbook_test_init playbook_test_full 74 | -------------------------------------------------------------------------------- /os_init.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Preparing system for Ansible 3 | hosts: all 4 | gather_facts: false 5 | become: yes 6 | vars: 7 | packages: python3 python3-dev python3-pip python3-netaddr python3-setuptools python3-requests 8 | pre_tasks: 9 | 10 | - name: Waiting server 11 | wait_for_connection: 12 | delay: 15 13 | timeout: 300 14 | when: "wait_server is defined and wait_server|bool == True" 15 | 16 | 17 | - name: Wait apt ready 18 | raw: bash -c "while fuser /var/lib/dpkg/lock >/dev/null 2>&1; do sleep 1; done;" 19 | changed_when: false 20 | 21 | - name: Install python for Ansible 22 | register: output 23 | raw: bash -c "test -e /usr/bin/python || (apt -qqy update && apt install -qqy {{packages}})" 24 | changed_when: output.stdout != "" 25 | 26 | - name: Copy key 27 | raw: 'mkdir -p ~/.ssh && chmod 700 ~/.ssh && grep -qF "{{support_key}}" {{auth_keys_f}} || echo "{{support_key}}" >> {{auth_keys_f}}' 28 | vars: 29 | auth_keys_f: ~/.ssh/authorized_keys 30 | -------------------------------------------------------------------------------- /os_upgrade.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: System upgrade 3 | hosts: all 4 | serial: 1 5 | become: yes 6 | tasks: 7 | 8 | - name: Updating tree and install aptitude 9 | apt: 10 | name: aptitude 11 | update_cache: yes 12 | 13 | - name: Upgrading 14 | apt: 15 | upgrade: dist 16 | 17 | - name: reboot the server 18 | shell: sleep 2 && shutdown -r now 19 | async: 1 20 | poll: 0 21 | 22 | - name: Wait for server come back 23 | wait_for_connection: 24 | delay: 15 25 | timeout: 300 26 | 27 | - debug: 28 | msg: "We're back again. Running on {{ inventory_hostname }}" 29 | -------------------------------------------------------------------------------- /platform.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Installing platform 3 | hosts: rockstat 4 | become: yes 5 | tasks: 6 | 7 | - debug: 8 | msg: 9 | email: "{{_email}}" 10 | realname: "{{_fullname}}" 11 | domain: "{{_domain}}" 12 | admin_password: "{{_admin_password}}" 13 | enable_support: "{{_enable_support}}" 14 | ssl_wildcard: "{{_ssl_wildcard}}" 15 | tags: ['never'] 16 | 17 | 18 | #### ##### ##### ##### ##### Check is python 3 ##### ##### ##### ##### ##### 19 | 20 | - name: Checking python version is 3 21 | assert: 22 | that: 23 | - "ansible_python_interpreter == '/usr/bin/python3'" 24 | msg: "Required python 3. Details: https://docs.ansible.com/ansible/latest/reference_appendices/python_3_support.html" 25 | tags: ["always"] 26 | 27 | - name: Build and set permissions for required dirs 28 | block: 29 | - name: Creating directories 30 | command: "mkdir -p {{ block_dirs }}" 31 | args: 32 | warn: False 33 | 34 | - name: Setting permissions 35 | command: "chown {{_uid}}:{{_gid}} {{ block_dirs }}" 36 | args: 37 | warn: False 38 | vars: 39 | block_dirs: "{{ create_dirs|flatten|join(' ') }}" 40 | tags: ["never", "full", "platform", "dirs"] 41 | 42 | 43 | ##### ##### ##### ##### ##### Server configuration ##### ##### ##### ##### ##### 44 | 45 | - block: 46 | - name: BaseServer role execution 47 | include_role: 48 | name: dr.server 49 | vars: 50 | drs_setup_user: yes 51 | drs_user: '{{support_user}}' 52 | drs_home_dir: "{{home_dir}}" 53 | drs_pub_key_file: '{{support_key_file}}' 54 | drs_disable_ipv6: "{{disable_ipv6}}" 55 | drs_extra_hosts: "{{etc_hosts}}" 56 | drs_uid: "{{_gid}}" 57 | drs_gid: "{{_uid}}" 58 | r_authorized_key_remove: "{{ not _enable_support }}" 59 | when: 'setup_server == True' 60 | tags: ['never', 'os', 'network', 'system', 'full'] 61 | 62 | ##### ##### ##### ##### ##### Check DNS configuration ##### ##### ##### ##### ##### 63 | # Should be after server configuration role that ensure dns utils is installed 64 | 65 | - block: 66 | - include_tasks: tasks/check_dns.yml 67 | vars: 68 | check_domains: "{{_domains + [_domain]}}" 69 | tags: ['never', 'ssl', 'full'] 70 | 71 | 72 | 73 | 74 | ##### ##### ##### ##### ##### Docker server ##### ##### ##### ##### ##### 75 | 76 | - block: 77 | - name: Docker role execution 78 | include_role: 79 | name: dr.docker 80 | vars: 81 | drd_users: 82 | - "{{support_user}}" 83 | drd_create_network: yes 84 | drd_version: edge 85 | drd_net_name: '{{docker_net_name}}' 86 | drd_bind_ip: "{{docker_host_ip}}" 87 | drd_interface: '{{docker_interface}}' 88 | drd_net: '{{docker_net}}' 89 | drd_mtu: '1400' 90 | tags: ['never', 'system', 'docker', 'full'] 91 | 92 | 93 | ##### ##### ##### ##### ##### Dashboard ##### ##### ##### ##### ##### 94 | 95 | - name: Cloning Dashboard content 96 | git: 97 | repo: '{{repos.dashboard}}' 98 | version: HEAD 99 | force: yes 100 | accept_hostkey: yes 101 | dest: '{{dirs.dashboard}}' 102 | tags: ['dashboard', 'platform', 'ppart', 'band', 'static', 'full'] 103 | 104 | 105 | ##### ##### ##### ##### ##### Splash screen ##### ##### ##### ##### ##### 106 | 107 | - block: 108 | - name: Cloning Splash content 109 | git: 110 | repo: "{{repos.splash}}" 111 | dest: "{{dirs.splash}}" 112 | accept_hostkey: yes 113 | force: yes 114 | 115 | - template: 116 | src: google-webmaster.j2 117 | dest: "{{dirs.splash}}/{{_google_webmaster}}" 118 | when: "_google_webmaster is defined and _google_webmaster != ''" 119 | 120 | tags: ['splash', 'platform', 'ppart', 'band', 'static', 'full'] 121 | 122 | 123 | ##### ##### ##### ##### ##### Exposeur (UFW, iptables) ##### ##### ##### ##### ##### 124 | 125 | - block: 126 | - name: Exposeur role execution 127 | include_role: 128 | name: dr.exposeur 129 | vars: 130 | expo_reset_ufw: true 131 | expo_rules: '{{firewall_rules|flatten + host_firewall_rules|default([]) + group_firewall_rules|default([])}}' 132 | expo_expose_rules: '{{expose_rules|default([]) + host_expose_rules|default([]) + group_expose_rules|default([])}}' 133 | tags: ['never', 'firewall', 'network', 'system', 'full'] 134 | 135 | ##### ##### ##### ##### ##### Lets encrypt - acme.sh ##### ##### ##### ##### ##### 136 | 137 | - block: 138 | 139 | - name: LetsEncrypt role execution 140 | include_role: 141 | name: dr.letsencrypt.wildcard.auto 142 | vars: 143 | _r_domain: "{{_domain}}" 144 | _r_subdomains: "{{_subdomains}}" 145 | _r_webroot: "{{dirs.well_known}}" 146 | _r_force: "{{ssl_force|default(False)|bool}}" 147 | _r_wildcard: "{{_ssl_wildcard}}" 148 | _r_docker_networks: [ { name: "{{docker_net_name}}" } ] 149 | _r_docker_labels: "{{ {}|combine(labels, docker_band_lbls) }}" 150 | _r_check_addr: "{{if_inner}}:{{ports.http.0}}" 151 | _r_bind_addr: "{{if_inner}}:{{ports.letsencrypt.0}}" 152 | _r_cert_root: "{{dirs.certs}}" 153 | _r_dns_provider: "{{_ssl_dns_provider|default('dns_acmedns')}}" 154 | _r_dns_envs: "{{_ssl_dns_envs|default({})}}" 155 | _r_debug: yes 156 | _r_log: no 157 | 158 | - shell: nginx -t && nginx -s reload || /bin/true 159 | 160 | vars: 161 | oldloc: "/etc/letsencrypt/live/{{_domain}}" 162 | labels: 163 | band.service.title: "SSL Renewer" 164 | band.service.def_position: "0x6" 165 | 166 | 167 | when: "_setup_ssl == True" 168 | tags: ['never', 'full', 'system', 'ssl'] 169 | 170 | 171 | ##### ##### ##### ##### ##### Nginx ##### ##### ##### ##### ##### 172 | 173 | - block: 174 | - include_vars: vars/nginx.yml 175 | - name: Nginx role execution 176 | include_role: 177 | name: jdauphant.nginx 178 | vars: 179 | nginx_official_repo: no 180 | keep_only_specified: yes 181 | nginx_http_params: '{{_nginx_http_params}}' 182 | nginx_sites: "{{_nginx_sites|combine(_nginx_sites_extra|default({}))}}" 183 | nginx_auth_basic_files: 184 | common: "{{ common_users|flatten }}" 185 | nginx_configs: 186 | upgrade: '{{_nginx_proto_upgrade}}' 187 | gzip: '{{_nginx_gzip_params}}' 188 | proxy: '{{_nginx_proxy_params + _nginx_proxy_params_extra|default([])}}' 189 | upstream: '{{_nginx_upstreams + _nginx_upstreams_extra|default([]) }}' 190 | ssl: '{{_nginx_ssl_params}}' 191 | vars: 192 | # local_config: "{{ansible_local.config|default({})}}" 193 | # local_general: "{{local_config.general|default({})}}" 194 | common_users: 195 | - "admin:{{_admin_password}}" 196 | # - "{{local_general.users}}" 197 | when: "setup_nginx == True" 198 | tags: ['never', 'nginx', 'httpd', 'system', 'full'] 199 | 200 | 201 | ##### ##### ##### ##### ##### Netdata ##### ##### ##### ##### ##### 202 | 203 | - block: 204 | 205 | 206 | - name: Netdata container 207 | docker_container: 208 | name: netdata 209 | hostname: netdata 210 | image: "{{images.netdata}}" 211 | labels: "{{docker_band_lbls}}" 212 | ports: 213 | - "{{if_inner}}:{{ports.netdata.0}}:{{ports.netdata.1}}" 214 | - "{{if_inner}}:{{ports.statsd.0}}:{{ports.statsd.1}}/udp" 215 | networks: [ { name: "{{docker_net_name}}" } ] 216 | etc_hosts: "{{etc_hosts}}" 217 | restart_policy: "{{_restart_policy}}" 218 | pull: yes 219 | env: "{{container_env}}" 220 | restart: yes 221 | # recreate: yes 222 | memory: "200m" 223 | capabilities: 224 | - SYS_PTRACE 225 | security_opts: 226 | - apparmor:unconfined 227 | volumes: 228 | - /proc:/host/proc:ro 229 | - /sys:/host/sys:ro 230 | - /var/run/docker.sock:/var/run/docker.sock 231 | 232 | - name: Netdata role execution 233 | include_role: 234 | name: dr.netdata 235 | vars: 236 | drn_action: remove 237 | drn_allow: 238 | dashboard_from: "*" 239 | badges_from: "*" 240 | conf_from: "*" 241 | connections_from: "*" 242 | streaming_from: "*" 243 | drn_stream: '{{netdata_stream_config|default({})}}' 244 | drn_backend: '{{netdata_backend_config|default({})}}' 245 | drn_bind_to: "{{if_inner}}" 246 | vars: 247 | container_env: 248 | PGID: "999" 249 | tags: ['never', 'netdata', 'system', 'full'] 250 | 251 | ##### ##### ##### ##### ##### Clickhouse ##### ##### ##### ##### ##### 252 | 253 | - block: 254 | - name: ClickHouse role execution 255 | include_role: 256 | name: AlexeySetevoi.clickhouse 257 | vars: 258 | clickhouse_listen_host_default: ["127.0.0.1", "::1", "{{if_inner}}"] 259 | clickhouse_networks_default: ["127.0.0.1", "::1", "{{docker_net}}"] 260 | clickhouse_profiles_custom: "{{_clickhouse_profiles_custom}}" 261 | clickhouse_users_custom: "{{_clickhouse_users_custom}}" 262 | clickhouse_logger: "{{_clickhouse_logger}}" 263 | clickhouse_dbs_custom: [ {name: '{{ch_db}}'} ] 264 | clickhouse_path_data: "{{dirs.clickhouse}}" 265 | clickhouse_path_tmp: "{{dirs.clickhouse_tmp}}" 266 | tags: ['never', 'clickhouse', 'clickhouse-server', 'system', 'full'] 267 | 268 | ##### ##### ##### ##### ##### Redis server ##### ##### ##### ##### ##### 269 | 270 | - name: Redis setup 271 | block: 272 | - name: Starting container for service Redis 273 | docker_container: 274 | name: redis 275 | hostname: redis 276 | image: "{{images.redis}}" 277 | # labels: "{{docker_band_lbls}}" 278 | networks: [ { name: "{{docker_net_name}}" } ] 279 | etc_hosts: "{{etc_hosts}}" 280 | restart_policy: "{{_restart_policy}}" 281 | restart: yes 282 | pull: yes 283 | recreate: yes 284 | memory: "300m" 285 | ports: ["{{if_inner}}:{{ports.redis.0}}:{{ports.redis.1}}"] 286 | volumes: ["{{dirs.redis}}:/data"] 287 | tags: ['never', 'redis', 'system', 'docker-container', 'full'] 288 | 289 | ##### ##### ##### ##### ##### Band Director ##### ##### ##### ##### ##### 290 | 291 | - block: 292 | 293 | - name: Pulling latest Band framework base image from docker hub 294 | docker_image: 295 | name: "{{images.band}}" 296 | pull: yes 297 | force: yes 298 | 299 | - name: Pulling BandSet services collection content from git 300 | git: 301 | repo: "{{repos.band_set}}" 302 | dest: "{{dirs.band_set}}" 303 | version: "{{branch}}" 304 | accept_hostkey: yes 305 | force: yes 306 | 307 | - name: Starting container for service Director 308 | docker_container: 309 | name: director 310 | hostname: director 311 | image: "{{images.director}}" 312 | labels: "{{docker_band_lbls}}" 313 | ports: [ "{{if_inner}}:{{ports.director.0}}:{{ports.director.1}}" ] 314 | networks: [ { name: "{{docker_net_name}}" } ] 315 | etc_hosts: "{{etc_hosts}}" 316 | env: "{{ container_env|combine({'ENCODED_ENV': container_env|to_json }) }}" 317 | restart_policy: "{{_restart_policy}}" 318 | pull: yes 319 | restart: yes 320 | recreate: yes 321 | memory: "300m" 322 | volumes: 323 | - "{{dirs.band_set}}:/images/band_set" 324 | - "{{dirs.rockme_set}}:/images/rockme_set" 325 | - "{{dirs.user_images}}:/images/user" 326 | - "{{dirs.director_data}}:/data" # containers configs 327 | - "{{dirs.environments}}:/data/environments" 328 | - "{{dirs.etc_dir}}:/srv/platform/etc" 329 | - "/var/run/docker.sock:/var/run/docker.sock" 330 | vars: 331 | service: director 332 | tags: ['band', 'platform', 'ppart', 'director', 'docker-container', 'full'] 333 | 334 | ##### ##### ##### ##### ##### Front ##### ##### ##### ##### ##### 335 | 336 | - block: 337 | - docker_container: name=frontier state=absent 338 | 339 | - name: Starting container for service Front 340 | docker_container: 341 | name: front 342 | hostname: front 343 | image: "{{ images.front }}" 344 | labels: "{{ docker_band_lbls }}" 345 | ports: 346 | - "{{if_inner}}:{{ports.front.0}}:{{ports.front.1}}" 347 | - "{{if_inner}}:{{ports.front_ws.0}}:{{ports.front_ws.1}}" 348 | networks: [ { name: "{{docker_net_name}}" } ] 349 | etc_hosts: "{{etc_hosts}}" 350 | env: "{{container_env}}" 351 | restart_policy: "{{_restart_policy}}" 352 | pull: yes 353 | restart: yes 354 | recreate: yes 355 | memory: "300m" 356 | volumes: 357 | - "{{dirs.front_custom_config}}:/app/config/custom" 358 | 359 | vars: 360 | service: "front" 361 | tags: ['front', 'platform', 'ppart', 'rockme', 'docker-container', 'full'] 362 | 363 | 364 | ##### ##### ##### ##### ##### ClickHouse migrations ##### ##### ##### ##### ##### 365 | # - name: Including ClickHouse maintain role 366 | # block: 367 | # - import_tasks: tasks/ch_migrate.yml 368 | # vars: 369 | # operate_db: "{{ch_db}}" 370 | # operate_host: "127.0.0.1" 371 | # migrations_path: clickhouse_migrations 372 | # tags: ['never', 'chmigrate'] 373 | 374 | ##### ##### ##### ##### ##### Clickhouse Proxy ##### ##### ##### ##### ##### 375 | 376 | - block: 377 | 378 | - name: Rendering CHProxy config 379 | template: 380 | src: 'chproxy/config.yml.j2' 381 | dest: '{{chproxy_config}}' 382 | 383 | - name: Starting container for service Chproxy 384 | docker_container: 385 | name: chproxy 386 | hostname: chproxy 387 | image: "{{images.chproxy}}" 388 | labels: "{{docker_band_lbls}}" 389 | networks: [ { name: "{{docker_net_name}}" } ] 390 | etc_hosts: "{{etc_hosts}}" 391 | restart_policy: "{{_restart_policy}}" 392 | restart: yes 393 | pull: yes 394 | recreate: yes 395 | memory: "200m" 396 | ports: ["{{if_inner}}:{{ports.chproxy.0}}:{{ports.chproxy.1}}"] 397 | volumes: ["{{chproxy_config}}:/config.yml:ro"] 398 | vars: 399 | chproxy_config: "{{etc_dir}}/chproxy.yml" 400 | chproxy_defs: "{{_chproxy_defs}}" 401 | chproxy_allowed_networks: "{{_chproxy_allowed_networks_default + _chproxy_allowed_networks|default([])}}" 402 | chproxy_clusters: "{{_chproxy_clusters_default + _chproxy_clusters|default([])}}" 403 | chproxy_users: "{{_chproxy_users_default + _chproxy_users|default([])}}" 404 | tags: ['never', 'clickhouse-proxy', 'chproxy', 'platform', 'pservice', 'docker-container', 'full'] 405 | 406 | 407 | ##### ##### ##### ##### ##### ClickHouse Writer ##### ##### ##### ##### ##### 408 | 409 | - block: 410 | - name: Starting container for service ChWriter 411 | docker_container: 412 | name: chwriter 413 | hostname: chwriter 414 | image: "{{images.chwriter}}" 415 | labels: "{{docker_band_lbls}}" 416 | networks: [ { name: "{{docker_net_name}}" } ] 417 | etc_hosts: "{{etc_hosts}}" 418 | env: "{{container_env}}" 419 | restart_policy: "{{_restart_policy}}" 420 | pull: yes 421 | restart: yes 422 | recreate: yes 423 | memory: "300m" 424 | volumes: 425 | - "{{dirs.chwriter_custom_config}}:/app/config/custom" 426 | - "{{dirs.chwriter_custom_migrations}}:/app/migrations/custom" 427 | - "{{dirs.chwriter_emergency}}:/app/emergency" 428 | vars: 429 | service: chwriter 430 | tags: ['chwriter', 'platform', 'ppart', 'rockme', 'docker-container', 'full'] 431 | 432 | ##### ##### ##### ##### ##### Heavyload ##### ##### ##### ##### ##### 433 | 434 | 435 | - block: 436 | - name: Starting container for service Heavyload 437 | docker_container: 438 | name: heavyload 439 | hostname: heavyload 440 | image: "{{images.heavyload}}" 441 | labels: "{{docker_band_lbls}}" 442 | networks: [ { name: "{{docker_net_name}}" } ] 443 | etc_hosts: "{{etc_hosts}}" 444 | env: "{{container_env}}" 445 | restart_policy: "{{_restart_policy}}" 446 | pull: yes 447 | restart: yes 448 | memory: "500m" 449 | volumes: 450 | - "{{dirs.uploads}}:/go/src/heavyload/upload" 451 | ports: 452 | - "{{if_inner}}:{{ports.heavyload.0}}:{{ports.heavyload.1}}" 453 | vars: 454 | service: heavyload 455 | tags: ['heavyload', 'platform', 'pservice', 'docker-container', 'full'] 456 | 457 | ##### ##### ##### ##### ##### Anaconda ##### ##### ##### ##### ##### 458 | - block: 459 | - name: Starting container for service Jupyter 460 | docker_container: 461 | name: anaconda 462 | hostname: anaconda 463 | image: "{{images.anaconda}}" 464 | labels: "{{docker_band_lbls}}" 465 | networks: [ { name: "{{docker_net_name}}" } ] 466 | etc_hosts: "{{etc_hosts}}" 467 | restart_policy: "{{_restart_policy}}" 468 | pull: yes 469 | recreate: yes 470 | memory: "{{ anaconda_mem_limit|default('2g') }}" 471 | volumes: 472 | - "{{dirs.notebooks}}:/opt/notebooks:cached" 473 | - "{{dirs.public}}:/opt/notebooks/public:cached" 474 | 475 | ports: [ "{{if_inner}}:{{ports.jupyter.0}}:{{ports.jupyter.1}}" ] 476 | env: "{{ container_env }}" 477 | vars: 478 | service: anaconda 479 | cmd_parts: 480 | when: 'setup_jupyter == True' 481 | tags: ['never', 'anaconda', 'jupyter', 'platform', 'pservice', 'docker-container', 'full'] 482 | 483 | 484 | ##### ##### ##### ##### ##### Grafana ##### ##### ##### ##### ##### 485 | - block: 486 | - name: Creating Grafana datadir 487 | file: 488 | state: directory 489 | path: "{{dirs.grafana_data}}" 490 | owner: "{{_uid}}" # grafana container ids 491 | group: "{{_gid}}" 492 | 493 | - name: Starting container for service Grafana 494 | docker_container: 495 | name: grafana 496 | hostname: grafana 497 | image: "{{images.grafana}}" 498 | labels: "{{docker_band_lbls}}" 499 | networks: [ { name: "{{docker_net_name}}" } ] 500 | etc_hosts: "{{etc_hosts}}" 501 | restart_policy: "{{_restart_policy}}" 502 | pull: yes 503 | restart: yes 504 | recreate: yes 505 | memory: "300m" 506 | ports: 507 | - "{{if_inner}}:{{ports.grafana.0}}:{{ports.grafana.1}}" 508 | volumes: 509 | - '{{dirs.grafana_data}}:/var/lib/grafana' 510 | tags: ['never', 'grafana', 'platform', 'pservice', 'docker-container', 'full'] 511 | 512 | 513 | ##### ##### ##### ##### ##### Theia ##### ##### ##### ##### ##### 514 | - block: 515 | 516 | - name: Fix workspace permissions 517 | command: "chown -R {{_uid}}:{{_gid}} {{dirs.workspace}}" 518 | args: 519 | warn: False 520 | 521 | - name: Starting container for service Theia 522 | docker_container: 523 | name: theia 524 | hostname: theia 525 | image: "{{images.theia}}" 526 | labels: "{{docker_band_lbls}}" 527 | ports: [ "{{if_inner}}:{{ports.theia.0}}:{{ports.theia.1}}" ] 528 | networks: [ { name: "{{docker_net_name}}" } ] 529 | etc_hosts: "{{etc_hosts}}" 530 | env: "{{ container_env }}" 531 | restart_policy: "{{_restart_policy}}" 532 | pull: yes 533 | restart: yes 534 | recreate: yes 535 | memory: "1g" 536 | volumes: 537 | - "{{dirs.workspace}}:{{project_dir}}:cached" 538 | - "{{dirs.user_images}}:{{project_dir}}/images:cached" 539 | - "{{dirs.user_libraries}}:{{project_dir}}/libraries:cached" 540 | - "{{dirs.public}}:{{project_dir}}/public:cached" 541 | - "{{dirs.chwriter_custom_migrations}}:{{project_dir}}/config/migrations:cached" 542 | - "{{dirs.band_set}}:{{project_dir}}/sources_ro/band_set:ro" 543 | - "{{dirs.band_set}}/__skeletons:{{project_dir}}/sources_ro/skeletons:ro" 544 | - "{{dirs.chwriter_custom_config}}:{{custom_config_dir}}/chwriter:cached" 545 | - "{{dirs.front_custom_config}}:{{custom_config_dir}}/front:cached" 546 | - "{{dirs.environments}}:{{config_dir}}/environments:cached" 547 | vars: 548 | project_dir: /home/theia/project 549 | config_dir: "{{project_dir}}/config" 550 | custom_config_dir: "{{config_dir}}/custom" 551 | service: theia 552 | when: 'setup_theia == True' 553 | tags: ['theia', 'platform', 'pservice', 'band', 'docker-container', 'full'] 554 | 555 | 556 | ##### ##### ##### ##### ##### VPN Server ##### ##### ##### ##### ##### 557 | 558 | 559 | - import_tasks: tasks/setup_vpn_server.yml 560 | tags: ['never', 'ovpn-server'] 561 | 562 | 563 | ##### ##### ##### ##### ##### Custom tasks ##### ##### ##### ##### ##### 564 | 565 | - import_tasks: tasks/custom.yml 566 | 567 | ##### ##### ##### ##### ##### Fake Shop ##### ##### ##### ##### ##### 568 | 569 | 570 | - block: 571 | - name: Starting container for service Fake-Shop 572 | docker_container: 573 | name: "{{service}}" 574 | hostname: "{{service}}" 575 | image: "{{images[service]}}" 576 | labels: "{{docker_band_lbls}}" 577 | env: "{{container_env}}" 578 | restart_policy: "{{_restart_policy}}" 579 | pull: yes 580 | restart: yes 581 | memory: "50m" 582 | ports: 583 | - "{{if_inner}}:{{ports[service].0}}:{{ports[service].1}}" 584 | vars: 585 | service: fakeshop 586 | tags: ['never', 'fakeshop', 'platform', 'pservice', 'docker-container', 'full'] 587 | 588 | - docker_container: name=ebaloger state=absent 589 | tags: ['never', 'ebaloger', 'system', 'platform', 'pservice', 'docker-container', 'full'] 590 | 591 | - docker_container: name=ebaloger state=absent 592 | tags: ['never', 'httpdebug', 'system', 'full'] 593 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | msgpack 2 | ansible 3 | cryptography 4 | passlib 5 | netaddr 6 | bumpversion 7 | -------------------------------------------------------------------------------- /roles/.keep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rockstat/bootstrap/4e974e3ad79dc337a940fac13536d9cd7a0bffa7/roles/.keep -------------------------------------------------------------------------------- /tasks/ch_migrate.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Ensure database exists 4 | command: "clickhouse-client -h {{operate_host}} -d default" 5 | args: 6 | stdin: "CREATE DATABASE IF NOT EXISTS {{operate_db}} " 7 | 8 | - name: Ensure migrations table exists 9 | command: "clickhouse-client -h {{operate_host}} -d default" 10 | args: 11 | stdin: "CREATE TABLE IF NOT EXISTS {{operate_db}}.migrations (name String) ENGINE = Log" 12 | 13 | - name: Selecting applied migrations 14 | command: "clickhouse-client -h {{operate_host}} -d default --query 'SELECT name FROM {{operate_db}}.migrations'" 15 | register: ch_migrations_query 16 | 17 | - block: 18 | - name: Apply migrations folder 19 | include_tasks: 'ch_migration_run.yml' 20 | vars: 21 | op: 22 | name: "{{item|basename|splitext|first}}" 23 | file: "{{item}}" 24 | with_list: "{{lookup('fileglob', migrations_path+'/*.yml').split(',')|sort}}" 25 | vars: 26 | ch_migrations_list: "{{ch_migrations_query.stdout.split('\n')}}" -------------------------------------------------------------------------------- /tasks/ch_migration_run.yml: -------------------------------------------------------------------------------- 1 | - debug: 2 | msg: 3 | - "{{op}}" 4 | - "{{operate_db}}" 5 | - "{{ch_migrations_list}}" 6 | 7 | - name: Running migration 8 | command: 'clickhouse-client -h {{operate_host}} -d default' 9 | args: 10 | stdin: "{{query}}" 11 | with_items: "{{lookup('file', op.file) | from_yaml}}" 12 | loop_control: 13 | loop_var: query 14 | when: op.name not in ch_migrations_list 15 | 16 | - name: Saving migration info 17 | command: 'clickhouse-client -h {{operate_host}} -d default' 18 | args: 19 | stdin: "INSERT INTO {{operate_db}}.migrations (name) VALUES('{{op.name}}')" 20 | when: op.name not in ch_migrations_list 21 | -------------------------------------------------------------------------------- /tasks/check_dns.yml: -------------------------------------------------------------------------------- 1 | # - name: Detecting public IP 2 | # uri: url=https://api.ipify.org?format=json return_content=yes body_format=json 3 | # register: ipify_result 4 | 5 | - name: Detecting main ip 6 | command: "dig {{_domain}} +short" 7 | register: domain_check_result 8 | 9 | - debug: 10 | msg: "Detect result {{domain_check_result.stdout}}" 11 | 12 | - name: Checking DNS 13 | command: "dig {{item}} +short" 14 | register: subdomain_check_result 15 | changed_when: False 16 | failed_when: "domain_check_result.stdout not in subdomain_check_result.stdout" 17 | with_items: "{{check_domains}}" 18 | -------------------------------------------------------------------------------- /tasks/setup_http_debug.yml: -------------------------------------------------------------------------------- 1 | 2 | ##### ##### ##### ##### ##### HTTP debug ##### ##### ##### ##### ##### 3 | 4 | - block: 5 | - debug: msg="Binding http debug to {{_bind}}. Container state {{_state}}" 6 | 7 | - docker_container: 8 | name: http_debug 9 | state: absent 10 | 11 | - name: Starting container for service HTTP-Debug 12 | docker_container: 13 | name: httpdebug 14 | state: "{{_state}}" 15 | hostname: httpdebug 16 | image: "{{images.httpdebug}}" 17 | labels: "{{docker_band_lbls}}" 18 | networks: [ { name: "{{docker_net_name}}" } ] 19 | etc_hosts: "{{etc_hosts}}" 20 | restart_policy: "{{_restart_policy}}" 21 | restart: yes 22 | pull: yes 23 | recreate: yes 24 | memory: "100m" 25 | ports: 26 | - "{{_bind}}" 27 | vars: 28 | _state: "{{ (bind|default('') == 'absent')|ternary('absent', 'started') }}" 29 | _bind_default: "{{if_inner}}:{{ports.httpdebug.0}}:{{ports.httpdebug.1}}" 30 | _bind_dirty: "{{bind|default(_bind_default)}}" 31 | _bind: "{{ _bind_dirty + ':' + ports.httpdebug.1|string if ':' in _bind_dirty else _bind_default }}" 32 | tags: ['never', 'httpdebug'] 33 | 34 | 35 | -------------------------------------------------------------------------------- /tasks/setup_logspout.yml: -------------------------------------------------------------------------------- 1 | 2 | ##### ##### ##### ##### ##### Logspout ##### ##### ##### ##### ##### 3 | 4 | - name: Configuring Logspout service 5 | block: 6 | 7 | - docker_container: name=logspout state=absent 8 | 9 | - name: Starting container for Logspout service 10 | docker_container: 11 | name: logspout 12 | hostname: logspout 13 | image: "{{images.logspout}}" 14 | labels: "{{docker_band_lbls}}" 15 | networks: [ { name: "{{docker_net_name}}" } ] 16 | etc_hosts: "{{etc_hosts}}" 17 | restart_policy: "{{_restart_policy}}" 18 | restart: yes 19 | pull: yes 20 | recreate: yes 21 | memory: "100m" 22 | env: "{{ container_env }}" 23 | volumes: [ "/var/run/docker.sock:/var/run/docker.sock" ] 24 | ports: 25 | - "{{if_inner}}:{{ports.logspout.http.0}}:{{ports.logspout.http.1}}" 26 | - "{{if_inner}}:{{ports.logspout.udp.0}}:{{ports.logspout.udp.1}}" 27 | vars: 28 | service: logspout 29 | endpoints: 30 | loggly: "syslog+tcp://{{_loggly_hostname}}" 31 | papertrail: "syslog+tls://{{_papertrail_hostname}}" 32 | env: 33 | common: 34 | SYSLOG_HOSTNAME: "{{hostname}}" 35 | TAIL: 100 36 | loggly: 37 | SYSLOG_STRUCTURED_DATA: "{{_loggly_api_key}}@41058 tag=\"Logspout\"" 38 | tags: ['never', 'logspout', 'logger', 'monitoring', 'platform', 'ppart', 'docker-container', 'full'] 39 | 40 | -------------------------------------------------------------------------------- /tasks/setup_metrics_server.yml: -------------------------------------------------------------------------------- 1 | - import_role: 2 | name: dr.influxdb 3 | vars: 4 | influxdb_http_bind_address: ":{{ports.influxhttp.0}}" 5 | influxdb_tsb_enabled: "true" 6 | influxdb_tsb_bind_address: ":{{ports.influxtsb.0}}" 7 | influxdb_tsb_database: "opentsdb" 8 | influxdb_tsb_retention_policy: "" 9 | influxdb_tsb_consistency_level: "one" 10 | influxdb_tsb_tls_enabled: "false" 11 | influxdb_tsb_certificate: "/etc/ssl/influxdb.pem" 12 | influxdb_tsb_log_point_errors: "true" 13 | influxdb_tsb_batch_size: 1000 14 | influxdb_tsb_batch_pending: 5 15 | influxdb_tsb_batch_timeout: "1s" 16 | influxdb_subscriber_enabled: "false" 17 | tags: ['influxdb', 'tick'] 18 | 19 | - import_role: 20 | name: dr.chronograf 21 | vars: 22 | chronograf_port: "{{ports.chronograf.0}}" 23 | chronograf_influxdb_url: "http://{{if_inner}}:{{ports.influxhttp.0}}" 24 | tags: ['chronograf', 'tick'] 25 | 26 | - import_role: 27 | name: dr.openvpn-server 28 | vars: 29 | openvpn_clients_net: "{{s2s_vpn_net}}" 30 | openvpn_port: "{{ports.s2svpn.0}}" 31 | openvpn_client_prefix: node 32 | openvpn_clients: 20 33 | tags: ['s2s-ovpn-server', 's2s-ovpn'] 34 | 35 | 36 | -------------------------------------------------------------------------------- /tasks/setup_vpn_server.yml: -------------------------------------------------------------------------------- 1 | 2 | - import_role: 3 | name: dr.openvpn-server 4 | vars: 5 | openvpn_port: "{{ports.openvpn.0}}" 6 | openvpn_clients: 5 7 | openvpn_comp_lz4: yes 8 | openvpn_clients_net: "{{s2s_vpn_net}}" 9 | openvpn_push: 10 | - "route {{docker_net_net}} {{docker_netmask}}" 11 | tags: ['openvpn'] 12 | 13 | # - import_role: 14 | # name: dr.openvpn-client 15 | # vars: 16 | # openvpnc_key: '{{s2s_vpn_key}}' 17 | # when: s2s_vpn_connect is defined and s2s_vpn_connect == True and vpn_id is defined 18 | # tags: ['s2s-ovpn-client', 's2s-ovpn'] 19 | 20 | -------------------------------------------------------------------------------- /templates/chproxy/config.yml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | server: 3 | http: 4 | listen_addr: ":9090" 5 | allowed_networks: 6 | {% for net in chproxy_allowed_networks|default([]) %} 7 | - "{{ net }}" 8 | {% endfor %} 9 | users: 10 | {% for user in chproxy_users|default([]) %} 11 | - name: "{{ user.name }}" 12 | password: "{{ user.password|default('') }}" 13 | allow_cors: {{ user.allow_cors|default(chproxy_defs.allow_cors) }} 14 | to_cluster: "{{ user.to_cluster|default(chproxy_defs.to_cluster) }}" 15 | to_user: "{{ user.to_user }}" 16 | max_queue_time: "{{ user.max_queue_time|default(chproxy_defs.max_queue_time) }}" 17 | max_execution_time: "{{ user.max_execution_time|default(chproxy_defs.max_execution_time) }}" 18 | max_concurrent_queries: {{ user.max_concurrent_queries|default(chproxy_defs.max_concurrent_queries) }} 19 | max_queue_size: {{ user.max_queue_size|default(chproxy_defs.max_queue_size) }} 20 | {% endfor %} 21 | # by default each cluster has `default` user which can be overridden by section `users` 22 | clusters: 23 | {% for cluster in chproxy_clusters|default([]) %} 24 | - name: "{{ cluster.name }}" 25 | nodes: 26 | {% for node in cluster.nodes|default([]) %} 27 | - "{{ node }}" 28 | {% endfor %} 29 | users: 30 | {% for user in cluster.users|default([]) %} 31 | - {{ user }} 32 | {% endfor %} 33 | {% endfor %} 34 | -------------------------------------------------------------------------------- /templates/facts.d/config.fact.j2: -------------------------------------------------------------------------------- 1 | [general] 2 | {% for k in _facts_dict.keys() %} 3 | {{k}}={{_facts_dict[k]}} 4 | {% endfor %} 5 | -------------------------------------------------------------------------------- /templates/google-webmaster.j2: -------------------------------------------------------------------------------- 1 | google-site-verification: {{_google_webmaster}} 2 | -------------------------------------------------------------------------------- /templates/nginx/upstream-site.conf.j2: -------------------------------------------------------------------------------- 1 | 2 | server { 3 | listen {{item.value.listen|default(443)}} ssl http2; 4 | server_name {{item.value.domain}}; 5 | #cache 6 | add_header 'Cache-Control' 'no-store; must-revalidate'; 7 | expires off; 8 | # common 9 | #ssl 10 | ssl_certificate_key /etc/letsencrypt/live/{{item.value.cert}}/privkey.pem; 11 | ssl_certificate /etc/letsencrypt/live/{{item.value.cert}}/fullchain.pem; 12 | ssl_trusted_certificate /etc/letsencrypt/live/{{item.value.cert}}/chain.pem; 13 | 14 | {% if 'uploader' in item.value and item.value.uploader == True %} 15 | location /upload { 16 | client_max_body_size 8m; 17 | client_body_buffer_size 128K; 18 | limit_except POST { deny all; } 19 | proxy_pass http://heavyload; 20 | } 21 | {% endif %} 22 | {% if item.value.root is defined and item.value.root != None %} 23 | index index.html; 24 | root {{item.value.root}}; 25 | location / { 26 | try_files $uri $uri/index.html @upstream; 27 | } 28 | location @upstream { 29 | proxy_pass http://{{item.key}}; 30 | {% if item.value.upgrade is defined %} 31 | proxy_http_version 1.1; 32 | proxy_set_header Upgrade $http_upgrade; 33 | proxy_set_header Connection $connection_upgrade; 34 | {% endif %} 35 | } 36 | } -------------------------------------------------------------------------------- /templates/theia/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM rockstat/theia-image 2 | 3 | -------------------------------------------------------------------------------- /vars/nginx.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # ########## Nginx ########## 3 | _nginx_gzip_params: 4 | - gzip on 5 | - gzip_min_length 10240 6 | - gzip_proxied expired no-cache no-store private auth 7 | - gzip_types text/plain text/css text/javascript application/javascript application/x-javascript 8 | _nginx_ssl_params: 9 | - ssl_session_cache shared:SSL:10m 10 | - ssl_session_timeout 10m 11 | - ssl_prefer_server_ciphers on 12 | - ssl_ciphers ECDH+AESGCM:ECDH+AES256:ECDH+AES128:DH+3DES:!ADH:!AECDH:!MD5 13 | - ssl_protocols TLSv1 TLSv1.1 TLSv1.2 14 | - ssl_stapling on 15 | - ssl_stapling_verify on 16 | _nginx_http_params: 17 | - tcp_nopush on 18 | - tcp_nodelay on 19 | - keepalive_timeout 60 20 | - keepalive_requests 1000 21 | - error_log /var/log/nginx/error.log 22 | - access_log /var/log/nginx/access.log 23 | # - access_log off 24 | _nginx_proxy_params: 25 | - proxy_http_version 1.1 26 | - proxy_redirect off 27 | - proxy_buffering on 28 | - proxy_connect_timeout 15s 29 | - proxy_send_timeout 15s 30 | - proxy_buffers 8 4k 31 | - proxy_store off 32 | - proxy_ssl_verify off 33 | - proxy_set_header Host $host 34 | - proxy_set_header X-Scheme $scheme 35 | - proxy_set_header X-Real-IP $remote_addr 36 | - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for 37 | - proxy_set_header Upgrade $http_upgrade 38 | # - proxy_cache_bypass $http_upgrade; 39 | - proxy_set_header Connection $connection_upgrade 40 | _nginx_proto_upgrade: 41 | - map $http_upgrade $connection_upgrade { 42 | default upgrade; 43 | '' close; 44 | } 45 | --------------------------------------------------------------------------------