├── LICENSE ├── README.md ├── conf ├── mysqld.cnf ├── nginx-conf.d │ └── opsant.conf ├── nginx.conf ├── openssl.cnf ├── prod.py └── redis.conf ├── docker ├── Dockerfile ├── README.md ├── openresty.ini ├── opsant.ini ├── supervisord.conf └── websocket.ini ├── generate-ssl.sh ├── install.config.example └── install.sh /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # OpsAnt介绍 2 | 3 | OpsAnt是全开源的云原生运维平台,致力于为全国数百万小微企业提供开源的多云管理和运维管理平台。目前提供免费下载试用,2021年年底完全开放源代码。 4 | 5 | - 前端开发:Vue.js + Ant Design of Vue 6 | - 后端开发:Python + Django 7 | - 数据库:MySQL、Redis 8 | 9 | > 本项目是OpsAnt Docker容器化部署项目,OpsAnt源码请查看opsant-backend和opsant-frontend项目。 10 | 11 | - 版本概览:v1.0.0 12 | 13 |

14 | 15 | 16 | 17 |

18 | 19 | - 微信交流群: 添加微信后,回复OpsAnt,即可加群。 20 | 21 | > 纯技术交流,非技术话题一键踢出,零容忍。 22 | 23 | 24 | 25 | ## 使用Docker部署OpsAnt 26 | 27 | 0. 环境准备 28 | 29 | > 部署OpsAnt需要一台2CPU、4G内存的云主机,硬盘默认即可。对外开放安全组:80和443端口。 30 | 31 | 1. 安装Docker和初始化使用的软件包 32 | 33 | - 【CentOS 7】安装Docker和MySQL客户端 34 | 35 | ``` 36 | curl -o /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo 37 | curl -o /etc/yum.repos.d/docker-ce.repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo 38 | yum install -y git wget docker-ce mariadb 39 | systemctl enable --now docker 40 | ``` 41 | 42 | - 【CentOS 8】安装Docker和MySQL客户端 43 | 44 | ``` 45 | dnf config-manager --add-repo=http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo 46 | dnf -y install docker-ce --nobest 47 | dnf -y install mariadb git 48 | systemctl enable --now docker 49 | ``` 50 | 51 | - 【Ubuntu】 安装Docker和MySQL客户端 52 | 53 | ``` 54 | # step 1: 安装必要的一些系统工具 55 | sudo apt-get update 56 | sudo apt-get -y install apt-transport-https ca-certificates curl software-properties-common 57 | # step 2: 安装GPG证书 58 | curl -fsSL https://mirrors.aliyun.com/docker-ce/linux/ubuntu/gpg | sudo apt-key add - 59 | # Step 3: 写入软件源信息 60 | sudo add-apt-repository "deb [arch=amd64] https://mirrors.aliyun.com/docker-ce/linux/ubuntu $(lsb_release -cs) stable" 61 | # Step 4: 更新并安装Docker-CE 62 | sudo apt-get -y update 63 | sudo apt-get -y install docker-ce wget mysql-client git 64 | systemctl enable --now docker 65 | ``` 66 | 67 | 2. 克隆项目代码 68 | 69 | ``` 70 | [root@linux-node1 ~]# git clone https://github.com/unixhot/opsant.git 71 | ``` 72 | 73 | 3. 修改配置文件并执行安装 74 | 75 | ``` 76 | [root@linux-node1 ~]# cd opsant/ 77 | [root@linux-node1 opsant]# cp install.config.example install.config 78 | [root@linux-node1 opsant]# vim install.config 79 | LOCAL_IP="192.168.56.11" 80 | DOMAIN_NAME="192.168.56.11" 81 | [root@linux-node1 opsant]# ./install.sh 82 | ``` 83 | > DOMAIN_NAME是指访问的地址,为了安全期间,访问地址不支持域名和IP混用。 84 | 85 | 4. 访问OpsAnt,支持Chrome、Firefox等,不支持IE浏览器 86 | 87 | https://192.168.56.11 88 | 89 | 6. 使用安装脚本输出的用户名和密码进行登录。 90 | 91 | 安装成功后会自动生成admin的密码,请使用输出的密码进行登录。并及时修改密码。 92 | -------------------------------------------------------------------------------- /conf/mysqld.cnf: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved. 2 | # 3 | # This program is free software; you can redistribute it and/or modify 4 | # it under the terms of the GNU General Public License, version 2.0, 5 | # as published by the Free Software Foundation. 6 | # 7 | # This program is also distributed with certain software (including 8 | # but not limited to OpenSSL) that is licensed under separate terms, 9 | # as designated in a particular file or component or in included license 10 | # documentation. The authors of MySQL hereby grant you an additional 11 | # permission to link the program and your derivative works with the 12 | # separately licensed software that they have included with MySQL. 13 | # 14 | # This program is distributed in the hope that it will be useful, 15 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 16 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 | # GNU General Public License, version 2.0, for more details. 18 | # 19 | # You should have received a copy of the GNU General Public License 20 | # along with this program; if not, write to the Free Software 21 | # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 22 | 23 | # 24 | # The MySQL Server configuration file. 25 | # 26 | # For explanations see 27 | # http://dev.mysql.com/doc/mysql/en/server-system-variables.html 28 | 29 | [mysqld] 30 | pid-file = /var/run/mysqld/mysqld.pid 31 | socket = /var/run/mysqld/mysqld.sock 32 | datadir = /var/lib/mysql 33 | log-error = /var/log/mysql/error.log 34 | # Disabling symbolic-links is recommended to prevent assorted security risks 35 | symbolic-links=0 36 | default-storage-engine = innodb 37 | innodb_file_per_table = on 38 | collation-server = utf8_general_ci 39 | character-set-server = utf8 40 | max_connections = 4096 41 | -------------------------------------------------------------------------------- /conf/nginx-conf.d/opsant.conf: -------------------------------------------------------------------------------- 1 | # upstream - web 2 | upstream OPSANT { 3 | server 127.0.0.1:8001 max_fails=1 fail_timeout=30s; 4 | } 5 | 6 | upstream WEBSOCKET { 7 | server 127.0.0.1:8002 max_fails=1 fail_timeout=30s; 8 | } 9 | 10 | server { 11 | listen 80; 12 | server_name DOMAIN_NAME; 13 | location ~ ^/uploads/(.*) { 14 | autoindex off; 15 | root /opt/opsant/; 16 | } 17 | location ~/ { 18 | rewrite ^(.*)$ https://$host$1 permanent; 19 | } 20 | } 21 | 22 | server { 23 | listen 443 ssl; 24 | server_name DOMAIN_NAME; 25 | access_log /opt/opsant/logs/nginx_access.log; 26 | error_log /opt/opsant/logs/nginx_error.log; 27 | ssl_certificate /usr/local/openresty/nginx/conf.d/ssl/DOMAIN_NAME.pem; 28 | ssl_certificate_key /usr/local/openresty/nginx/conf.d/ssl/DOMAIN_NAME.key; 29 | ssl_session_timeout 5m; 30 | ssl_ciphers ECDHE-RSA-AES128-GCM-SHA256:ECDHE:ECDH:AES:HIGH:!NULL:!aNULL:!MD5:!ADH:!RC4; 31 | ssl_protocols TLSv1 TLSv1.1 TLSv1.2; 32 | ssl_prefer_server_ciphers on; 33 | # gzip config 34 | gzip on; 35 | gzip_min_length 1k; 36 | gzip_comp_level 9; 37 | gzip_types text/plain application/javascript application/x-javascript text/css application/xml text/javascript application/x-httpd-php image/jpeg image/gif image/png; 38 | gzip_vary on; 39 | gzip_disable "MSIE [1-6]\."; 40 | client_max_body_size 512m; 41 | 42 | # ============================ paas ============================ 43 | # Static 44 | location /static/ { 45 | autoindex off; 46 | root /opt/opsant-backend; 47 | } 48 | # CONTROL WebSocket 49 | location /ws/control/ { 50 | proxy_pass http://WEBSOCKET; 51 | proxy_http_version 1.1; 52 | proxy_set_header Upgrade $http_upgrade; 53 | proxy_set_header Connection "upgrade"; 54 | proxy_redirect off; 55 | proxy_set_header Host $host; 56 | proxy_set_header X-Real-IP $remote_addr; 57 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 58 | proxy_set_header X-Forwarded-Host $server_name; 59 | } 60 | # PAAS_SERVICE HOST/PORT 61 | location / { 62 | proxy_pass http://OPSANT; 63 | proxy_pass_header Server; 64 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 65 | proxy_set_header X-Real-IP $remote_addr; 66 | proxy_set_header X-Scheme $scheme; 67 | proxy_set_header Host $http_host; 68 | proxy_redirect off; 69 | proxy_read_timeout 600; 70 | } 71 | 72 | location ~ ^/uploads/(.*) { 73 | autoindex off; 74 | root /opt/opsant/; 75 | } 76 | 77 | # PAAS_SERVICE HOST/PORT, for doc 78 | location ~ ^/docs/(.*) { 79 | proxy_pass http://OPSANT/static/docs/$1$is_args$args; 80 | proxy_pass_header Server; 81 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 82 | proxy_set_header X-Real-IP $remote_addr; 83 | proxy_set_header X-Scheme $scheme; 84 | proxy_set_header Host $http_host; 85 | proxy_redirect off; 86 | proxy_read_timeout 600; 87 | 88 | } 89 | } 90 | -------------------------------------------------------------------------------- /conf/nginx.conf: -------------------------------------------------------------------------------- 1 | user root; 2 | daemon off; 3 | worker_processes auto; 4 | error_log /opt/opsant/logs/openresty_error.log; 5 | pid /run/nginx.pid; 6 | 7 | events { 8 | worker_connections 1024; 9 | } 10 | 11 | http { 12 | log_format main '$remote_addr - $remote_user [$time_local] "$request" ' 13 | '$status $body_bytes_sent "$http_referer" ' 14 | '"$http_user_agent" "$http_x_forwarded_for"'; 15 | 16 | access_log /opt/opsant/logs/openresty_access.log main; 17 | 18 | sendfile on; 19 | tcp_nopush on; 20 | tcp_nodelay on; 21 | keepalive_timeout 65; 22 | types_hash_max_size 2048; 23 | client_max_body_size 2G; 24 | server_names_hash_bucket_size 256; 25 | underscores_in_headers on; 26 | 27 | include /usr/local/openresty/nginx/conf/mime.types; 28 | default_type application/octet-stream; 29 | include /usr/local/openresty/nginx/conf.d/*.conf; 30 | 31 | server { 32 | listen 80 default_server; 33 | listen [::]:80 default_server; 34 | server_name _; 35 | root /usr/share/nginx/html; 36 | 37 | location / { 38 | } 39 | 40 | error_page 404 /404.html; 41 | location = /40x.html { 42 | } 43 | 44 | error_page 500 502 503 504 /50x.html; 45 | location = /50x.html { 46 | } 47 | } 48 | } 49 | 50 | -------------------------------------------------------------------------------- /conf/openssl.cnf: -------------------------------------------------------------------------------- 1 | #################################################################### 2 | [ req ] 3 | default_bits = 2048 4 | default_keyfile = privkey.pem 5 | distinguished_name = req_distinguished_name 6 | req_extensions = req_ext 7 | x509_extensions = v3_req 8 | prompt = no 9 | 10 | [ req_distinguished_name ] 11 | countryName = CN 12 | countryName_default = CN 13 | countryName_min = 2 14 | countryName_max = 2 15 | 16 | stateOrProvinceName = 北京 17 | stateOrProvinceName_default = 北京 18 | 19 | localityName = 北京 20 | 21 | 0.organizationName = OpsAny 22 | 0.organizationName_default = OpsAny 23 | 24 | organizationalUnitName = OpsAny 25 | #organizationalUnitName_default = OpsAny 26 | 27 | commonName = Common Name (e.g. server FQDN or YOUR name) 28 | commonName_max = 64 29 | 30 | emailAddress = OpsAny@womaiyun.com 31 | emailAddress_max = 64 32 | 33 | [req_ext] 34 | subjectAltName = @alt_names 35 | 36 | [ v3_req ] 37 | 38 | basicConstraints = critical, CA:FALSE 39 | keyUsage = critical, nonRepudiation, digitalSignature, keyEncipherment, keyAgreement 40 | extendedKeyUsage = critical, serverAuth 41 | #subjectAltName = @alt_names 42 | 43 | #[alt_names] 44 | #IP.1 = 127.0.0.1 45 | -------------------------------------------------------------------------------- /conf/prod.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 生产环境配置文件 3 | from config.default import * 4 | 5 | RUN_MODE = 'PRODUCT' 6 | BROKER_URL = 'redis://:REDIS_SERVER_PASSWORD@REDIS_SERVER_IP:6379/8' 7 | OPS_ANT_COOKIE_NAME = 'opsant_token' 8 | OPS_ANT_COOKIE_AGE = 60 * 60 * 24 9 | DEBUG = False 10 | 11 | # 本地开发数据库设置 12 | DATABASES = { 13 | 'default': { 14 | 'ENGINE': 'django.db.backends.mysql', 15 | 'NAME': 'opsant', 16 | 'USER': 'opsant', 17 | 'PASSWORD': 'MYSQL_OPSANT_PASSWORD', 18 | 'HOST': 'MYSQL_SERVER_IP', 19 | 'PORT': '3306', 20 | }, 21 | } 22 | 23 | # Celery使用 24 | CHANNEL_LAYERS = { 25 | 'default': { 26 | 'BACKEND': 'channels_redis.core.RedisChannelLayer', 27 | 'CONFIG': { 28 | 'hosts': ['redis://:REDIS_SERVER_PASSWORD@REDIS_SERVER_IP:6379/7'], 29 | "symmetric_encryption_keys": [SECRET_KEY], 30 | }, 31 | } 32 | } 33 | 34 | # 静态文件访问路径 35 | STATIC_URL = '/static/' 36 | STATIC_ROOT = os.path.join(BASE_DIR, "static") 37 | 38 | # Windows堡垒机访问配置 39 | GUACD_HOST = 'GUACD_SERVER_IP' 40 | GUACD_PORT = '4822' 41 | ORI_GUACD_PATH = '/opt/opsant/uploads/guacamole/' 42 | GUACD_PATH = '/srv/guacamole' 43 | 44 | # Linux堡垒机访问配置 45 | TERMINAL_TIMEOUT = 1800 46 | TERMINAL_PATH = '/opt/opsant/uploads/terminal' 47 | 48 | # 初始化用户配置 49 | ADMIN_PASSWORD = 'admin' 50 | ADMIN_EMAIL = 'admin@example.com' 51 | ADMIN_PHONE = '13666666666' 52 | -------------------------------------------------------------------------------- /conf/redis.conf: -------------------------------------------------------------------------------- 1 | # Redis configuration file example. 2 | # 3 | # Note that in order to read the configuration file, Redis must be 4 | # started with the file path as first argument: 5 | # 6 | # ./redis-server /path/to/redis.conf 7 | 8 | # Note on units: when memory size is needed, it is possible to specify 9 | # it in the usual form of 1k 5GB 4M and so forth: 10 | # 11 | # 1k => 1000 bytes 12 | # 1kb => 1024 bytes 13 | # 1m => 1000000 bytes 14 | # 1mb => 1024*1024 bytes 15 | # 1g => 1000000000 bytes 16 | # 1gb => 1024*1024*1024 bytes 17 | # 18 | # units are case insensitive so 1GB 1Gb 1gB are all the same. 19 | 20 | ################################## INCLUDES ################################### 21 | 22 | # Include one or more other config files here. This is useful if you 23 | # have a standard template that goes to all Redis servers but also need 24 | # to customize a few per-server settings. Include files can include 25 | # other files, so use this wisely. 26 | # 27 | # Notice option "include" won't be rewritten by command "CONFIG REWRITE" 28 | # from admin or Redis Sentinel. Since Redis always uses the last processed 29 | # line as value of a configuration directive, you'd better put includes 30 | # at the beginning of this file to avoid overwriting config change at runtime. 31 | # 32 | # If instead you are interested in using includes to override configuration 33 | # options, it is better to use include as the last line. 34 | # 35 | # include /path/to/local.conf 36 | # include /path/to/other.conf 37 | 38 | ################################## NETWORK ##################################### 39 | 40 | # By default, if no "bind" configuration directive is specified, Redis listens 41 | # for connections from all the network interfaces available on the server. 42 | # It is possible to listen to just one or multiple selected interfaces using 43 | # the "bind" configuration directive, followed by one or more IP addresses. 44 | # 45 | # Examples: 46 | # 47 | # bind 192.168.1.100 10.0.0.1 48 | # bind 127.0.0.1 ::1 49 | # 50 | # ~~~ WARNING ~~~ If the computer running Redis is directly exposed to the 51 | # internet, binding to all the interfaces is dangerous and will expose the 52 | # instance to everybody on the internet. So by default we uncomment the 53 | # following bind directive, that will force Redis to listen only into 54 | # the IPv4 lookback interface address (this means Redis will be able to 55 | # accept connections only from clients running into the same computer it 56 | # is running). 57 | # 58 | # IF YOU ARE SURE YOU WANT YOUR INSTANCE TO LISTEN TO ALL THE INTERFACES 59 | # JUST COMMENT THE FOLLOWING LINE. 60 | # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 61 | bind 0.0.0.0 62 | 63 | # Protected mode is a layer of security protection, in order to avoid that 64 | # Redis instances left open on the internet are accessed and exploited. 65 | # 66 | # When protected mode is on and if: 67 | # 68 | # 1) The server is not binding explicitly to a set of addresses using the 69 | # "bind" directive. 70 | # 2) No password is configured. 71 | # 72 | # The server only accepts connections from clients connecting from the 73 | # IPv4 and IPv6 loopback addresses 127.0.0.1 and ::1, and from Unix domain 74 | # sockets. 75 | # 76 | # By default protected mode is enabled. You should disable it only if 77 | # you are sure you want clients from other hosts to connect to Redis 78 | # even if no authentication is configured, nor a specific set of interfaces 79 | # are explicitly listed using the "bind" directive. 80 | protected-mode yes 81 | 82 | # Accept connections on the specified port, default is 6379 (IANA #815344). 83 | # If port 0 is specified Redis will not listen on a TCP socket. 84 | port 6379 85 | 86 | # TCP listen() backlog. 87 | # 88 | # In high requests-per-second environments you need an high backlog in order 89 | # to avoid slow clients connections issues. Note that the Linux kernel 90 | # will silently truncate it to the value of /proc/sys/net/core/somaxconn so 91 | # make sure to raise both the value of somaxconn and tcp_max_syn_backlog 92 | # in order to get the desired effect. 93 | tcp-backlog 511 94 | 95 | # Unix socket. 96 | # 97 | # Specify the path for the Unix socket that will be used to listen for 98 | # incoming connections. There is no default, so Redis will not listen 99 | # on a unix socket when not specified. 100 | # 101 | # unixsocket /tmp/redis.sock 102 | # unixsocketperm 700 103 | 104 | # Close the connection after a client is idle for N seconds (0 to disable) 105 | timeout 0 106 | 107 | # TCP keepalive. 108 | # 109 | # If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence 110 | # of communication. This is useful for two reasons: 111 | # 112 | # 1) Detect dead peers. 113 | # 2) Take the connection alive from the point of view of network 114 | # equipment in the middle. 115 | # 116 | # On Linux, the specified value (in seconds) is the period used to send ACKs. 117 | # Note that to close the connection the double of the time is needed. 118 | # On other kernels the period depends on the kernel configuration. 119 | # 120 | # A reasonable value for this option is 300 seconds, which is the new 121 | # Redis default starting with Redis 3.2.1. 122 | tcp-keepalive 300 123 | 124 | ################################# GENERAL ##################################### 125 | 126 | # By default Redis does not run as a daemon. Use 'yes' if you need it. 127 | # Note that Redis will write a pid file in /var/run/redis.pid when daemonized. 128 | daemonize no 129 | 130 | # If you run Redis from upstart or systemd, Redis can interact with your 131 | # supervision tree. Options: 132 | # supervised no - no supervision interaction 133 | # supervised upstart - signal upstart by putting Redis into SIGSTOP mode 134 | # supervised systemd - signal systemd by writing READY=1 to $NOTIFY_SOCKET 135 | # supervised auto - detect upstart or systemd method based on 136 | # UPSTART_JOB or NOTIFY_SOCKET environment variables 137 | # Note: these supervision methods only signal "process is ready." 138 | # They do not enable continuous liveness pings back to your supervisor. 139 | supervised no 140 | 141 | # If a pid file is specified, Redis writes it where specified at startup 142 | # and removes it at exit. 143 | # 144 | # When the server runs non daemonized, no pid file is created if none is 145 | # specified in the configuration. When the server is daemonized, the pid file 146 | # is used even if not specified, defaulting to "/var/run/redis.pid". 147 | # 148 | # Creating a pid file is best effort: if Redis is not able to create it 149 | # nothing bad happens, the server will start and run normally. 150 | pidfile /data/redis_6379.pid 151 | 152 | # Specify the server verbosity level. 153 | # This can be one of: 154 | # debug (a lot of information, useful for development/testing) 155 | # verbose (many rarely useful info, but not a mess like the debug level) 156 | # notice (moderately verbose, what you want in production probably) 157 | # warning (only very important / critical messages are logged) 158 | loglevel notice 159 | 160 | # Specify the log file name. Also the empty string can be used to force 161 | # Redis to log on the standard output. Note that if you use standard 162 | # output for logging but daemonize, logs will be sent to /dev/null 163 | logfile /data/redis.log 164 | 165 | # To enable logging to the system logger, just set 'syslog-enabled' to yes, 166 | # and optionally update the other syslog parameters to suit your needs. 167 | # syslog-enabled no 168 | 169 | # Specify the syslog identity. 170 | # syslog-ident redis 171 | 172 | # Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7. 173 | # syslog-facility local0 174 | 175 | # Set the number of databases. The default database is DB 0, you can select 176 | # a different one on a per-connection basis using SELECT where 177 | # dbid is a number between 0 and 'databases'-1 178 | databases 16 179 | 180 | ################################ SNAPSHOTTING ################################ 181 | # 182 | # Save the DB on disk: 183 | # 184 | # save 185 | # 186 | # Will save the DB if both the given number of seconds and the given 187 | # number of write operations against the DB occurred. 188 | # 189 | # In the example below the behaviour will be to save: 190 | # after 900 sec (15 min) if at least 1 key changed 191 | # after 300 sec (5 min) if at least 10 keys changed 192 | # after 60 sec if at least 10000 keys changed 193 | # 194 | # Note: you can disable saving completely by commenting out all "save" lines. 195 | # 196 | # It is also possible to remove all the previously configured save 197 | # points by adding a save directive with a single empty string argument 198 | # like in the following example: 199 | # 200 | # save "" 201 | 202 | save 900 1 203 | save 300 10 204 | save 60 10000 205 | 206 | # By default Redis will stop accepting writes if RDB snapshots are enabled 207 | # (at least one save point) and the latest background save failed. 208 | # This will make the user aware (in a hard way) that data is not persisting 209 | # on disk properly, otherwise chances are that no one will notice and some 210 | # disaster will happen. 211 | # 212 | # If the background saving process will start working again Redis will 213 | # automatically allow writes again. 214 | # 215 | # However if you have setup your proper monitoring of the Redis server 216 | # and persistence, you may want to disable this feature so that Redis will 217 | # continue to work as usual even if there are problems with disk, 218 | # permissions, and so forth. 219 | stop-writes-on-bgsave-error yes 220 | 221 | # Compress string objects using LZF when dump .rdb databases? 222 | # For default that's set to 'yes' as it's almost always a win. 223 | # If you want to save some CPU in the saving child set it to 'no' but 224 | # the dataset will likely be bigger if you have compressible values or keys. 225 | rdbcompression yes 226 | 227 | # Since version 5 of RDB a CRC64 checksum is placed at the end of the file. 228 | # This makes the format more resistant to corruption but there is a performance 229 | # hit to pay (around 10%) when saving and loading RDB files, so you can disable it 230 | # for maximum performances. 231 | # 232 | # RDB files created with checksum disabled have a checksum of zero that will 233 | # tell the loading code to skip the check. 234 | rdbchecksum yes 235 | 236 | # The filename where to dump the DB 237 | dbfilename dump.rdb 238 | 239 | # The working directory. 240 | # 241 | # The DB will be written inside this directory, with the filename specified 242 | # above using the 'dbfilename' configuration directive. 243 | # 244 | # The Append Only File will also be created inside this directory. 245 | # 246 | # Note that you must specify a directory here, not a file name. 247 | dir /data 248 | 249 | ################################# REPLICATION ################################# 250 | 251 | # Master-Slave replication. Use slaveof to make a Redis instance a copy of 252 | # another Redis server. A few things to understand ASAP about Redis replication. 253 | # 254 | # 1) Redis replication is asynchronous, but you can configure a master to 255 | # stop accepting writes if it appears to be not connected with at least 256 | # a given number of slaves. 257 | # 2) Redis slaves are able to perform a partial resynchronization with the 258 | # master if the replication link is lost for a relatively small amount of 259 | # time. You may want to configure the replication backlog size (see the next 260 | # sections of this file) with a sensible value depending on your needs. 261 | # 3) Replication is automatic and does not need user intervention. After a 262 | # network partition slaves automatically try to reconnect to masters 263 | # and resynchronize with them. 264 | # 265 | # slaveof 266 | 267 | # If the master is password protected (using the "requirepass" configuration 268 | # directive below) it is possible to tell the slave to authenticate before 269 | # starting the replication synchronization process, otherwise the master will 270 | # refuse the slave request. 271 | # 272 | requirepass REDIS_SERVER_PASSWORD 273 | # masterauth 274 | 275 | # When a slave loses its connection with the master, or when the replication 276 | # is still in progress, the slave can act in two different ways: 277 | # 278 | # 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will 279 | # still reply to client requests, possibly with out of date data, or the 280 | # data set may just be empty if this is the first synchronization. 281 | # 282 | # 2) if slave-serve-stale-data is set to 'no' the slave will reply with 283 | # an error "SYNC with master in progress" to all the kind of commands 284 | # but to INFO and SLAVEOF. 285 | # 286 | slave-serve-stale-data yes 287 | 288 | # You can configure a slave instance to accept writes or not. Writing against 289 | # a slave instance may be useful to store some ephemeral data (because data 290 | # written on a slave will be easily deleted after resync with the master) but 291 | # may also cause problems if clients are writing to it because of a 292 | # misconfiguration. 293 | # 294 | # Since Redis 2.6 by default slaves are read-only. 295 | # 296 | # Note: read only slaves are not designed to be exposed to untrusted clients 297 | # on the internet. It's just a protection layer against misuse of the instance. 298 | # Still a read only slave exports by default all the administrative commands 299 | # such as CONFIG, DEBUG, and so forth. To a limited extent you can improve 300 | # security of read only slaves using 'rename-command' to shadow all the 301 | # administrative / dangerous commands. 302 | slave-read-only yes 303 | 304 | # Replication SYNC strategy: disk or socket. 305 | # 306 | # ------------------------------------------------------- 307 | # WARNING: DISKLESS REPLICATION IS EXPERIMENTAL CURRENTLY 308 | # ------------------------------------------------------- 309 | # 310 | # New slaves and reconnecting slaves that are not able to continue the replication 311 | # process just receiving differences, need to do what is called a "full 312 | # synchronization". An RDB file is transmitted from the master to the slaves. 313 | # The transmission can happen in two different ways: 314 | # 315 | # 1) Disk-backed: The Redis master creates a new process that writes the RDB 316 | # file on disk. Later the file is transferred by the parent 317 | # process to the slaves incrementally. 318 | # 2) Diskless: The Redis master creates a new process that directly writes the 319 | # RDB file to slave sockets, without touching the disk at all. 320 | # 321 | # With disk-backed replication, while the RDB file is generated, more slaves 322 | # can be queued and served with the RDB file as soon as the current child producing 323 | # the RDB file finishes its work. With diskless replication instead once 324 | # the transfer starts, new slaves arriving will be queued and a new transfer 325 | # will start when the current one terminates. 326 | # 327 | # When diskless replication is used, the master waits a configurable amount of 328 | # time (in seconds) before starting the transfer in the hope that multiple slaves 329 | # will arrive and the transfer can be parallelized. 330 | # 331 | # With slow disks and fast (large bandwidth) networks, diskless replication 332 | # works better. 333 | repl-diskless-sync no 334 | 335 | # When diskless replication is enabled, it is possible to configure the delay 336 | # the server waits in order to spawn the child that transfers the RDB via socket 337 | # to the slaves. 338 | # 339 | # This is important since once the transfer starts, it is not possible to serve 340 | # new slaves arriving, that will be queued for the next RDB transfer, so the server 341 | # waits a delay in order to let more slaves arrive. 342 | # 343 | # The delay is specified in seconds, and by default is 5 seconds. To disable 344 | # it entirely just set it to 0 seconds and the transfer will start ASAP. 345 | repl-diskless-sync-delay 5 346 | 347 | # Slaves send PINGs to server in a predefined interval. It's possible to change 348 | # this interval with the repl_ping_slave_period option. The default value is 10 349 | # seconds. 350 | # 351 | # repl-ping-slave-period 10 352 | 353 | # The following option sets the replication timeout for: 354 | # 355 | # 1) Bulk transfer I/O during SYNC, from the point of view of slave. 356 | # 2) Master timeout from the point of view of slaves (data, pings). 357 | # 3) Slave timeout from the point of view of masters (REPLCONF ACK pings). 358 | # 359 | # It is important to make sure that this value is greater than the value 360 | # specified for repl-ping-slave-period otherwise a timeout will be detected 361 | # every time there is low traffic between the master and the slave. 362 | # 363 | # repl-timeout 60 364 | 365 | # Disable TCP_NODELAY on the slave socket after SYNC? 366 | # 367 | # If you select "yes" Redis will use a smaller number of TCP packets and 368 | # less bandwidth to send data to slaves. But this can add a delay for 369 | # the data to appear on the slave side, up to 40 milliseconds with 370 | # Linux kernels using a default configuration. 371 | # 372 | # If you select "no" the delay for data to appear on the slave side will 373 | # be reduced but more bandwidth will be used for replication. 374 | # 375 | # By default we optimize for low latency, but in very high traffic conditions 376 | # or when the master and slaves are many hops away, turning this to "yes" may 377 | # be a good idea. 378 | repl-disable-tcp-nodelay no 379 | 380 | # Set the replication backlog size. The backlog is a buffer that accumulates 381 | # slave data when slaves are disconnected for some time, so that when a slave 382 | # wants to reconnect again, often a full resync is not needed, but a partial 383 | # resync is enough, just passing the portion of data the slave missed while 384 | # disconnected. 385 | # 386 | # The bigger the replication backlog, the longer the time the slave can be 387 | # disconnected and later be able to perform a partial resynchronization. 388 | # 389 | # The backlog is only allocated once there is at least a slave connected. 390 | # 391 | # repl-backlog-size 1mb 392 | 393 | # After a master has no longer connected slaves for some time, the backlog 394 | # will be freed. The following option configures the amount of seconds that 395 | # need to elapse, starting from the time the last slave disconnected, for 396 | # the backlog buffer to be freed. 397 | # 398 | # A value of 0 means to never release the backlog. 399 | # 400 | # repl-backlog-ttl 3600 401 | 402 | # The slave priority is an integer number published by Redis in the INFO output. 403 | # It is used by Redis Sentinel in order to select a slave to promote into a 404 | # master if the master is no longer working correctly. 405 | # 406 | # A slave with a low priority number is considered better for promotion, so 407 | # for instance if there are three slaves with priority 10, 100, 25 Sentinel will 408 | # pick the one with priority 10, that is the lowest. 409 | # 410 | # However a special priority of 0 marks the slave as not able to perform the 411 | # role of master, so a slave with priority of 0 will never be selected by 412 | # Redis Sentinel for promotion. 413 | # 414 | # By default the priority is 100. 415 | slave-priority 100 416 | 417 | # It is possible for a master to stop accepting writes if there are less than 418 | # N slaves connected, having a lag less or equal than M seconds. 419 | # 420 | # The N slaves need to be in "online" state. 421 | # 422 | # The lag in seconds, that must be <= the specified value, is calculated from 423 | # the last ping received from the slave, that is usually sent every second. 424 | # 425 | # This option does not GUARANTEE that N replicas will accept the write, but 426 | # will limit the window of exposure for lost writes in case not enough slaves 427 | # are available, to the specified number of seconds. 428 | # 429 | # For example to require at least 3 slaves with a lag <= 10 seconds use: 430 | # 431 | # min-slaves-to-write 3 432 | # min-slaves-max-lag 10 433 | # 434 | # Setting one or the other to 0 disables the feature. 435 | # 436 | # By default min-slaves-to-write is set to 0 (feature disabled) and 437 | # min-slaves-max-lag is set to 10. 438 | 439 | # A Redis master is able to list the address and port of the attached 440 | # slaves in different ways. For example the "INFO replication" section 441 | # offers this information, which is used, among other tools, by 442 | # Redis Sentinel in order to discover slave instances. 443 | # Another place where this info is available is in the output of the 444 | # "ROLE" command of a masteer. 445 | # 446 | # The listed IP and address normally reported by a slave is obtained 447 | # in the following way: 448 | # 449 | # IP: The address is auto detected by checking the peer address 450 | # of the socket used by the slave to connect with the master. 451 | # 452 | # Port: The port is communicated by the slave during the replication 453 | # handshake, and is normally the port that the slave is using to 454 | # list for connections. 455 | # 456 | # However when port forwarding or Network Address Translation (NAT) is 457 | # used, the slave may be actually reachable via different IP and port 458 | # pairs. The following two options can be used by a slave in order to 459 | # report to its master a specific set of IP and port, so that both INFO 460 | # and ROLE will report those values. 461 | # 462 | # There is no need to use both the options if you need to override just 463 | # the port or the IP address. 464 | # 465 | # slave-announce-ip 5.5.5.5 466 | # slave-announce-port 1234 467 | 468 | ################################## SECURITY ################################### 469 | 470 | # Require clients to issue AUTH before processing any other 471 | # commands. This might be useful in environments in which you do not trust 472 | # others with access to the host running redis-server. 473 | # 474 | # This should stay commented out for backward compatibility and because most 475 | # people do not need auth (e.g. they run their own servers). 476 | # 477 | # Warning: since Redis is pretty fast an outside user can try up to 478 | # 150k passwords per second against a good box. This means that you should 479 | # use a very strong password otherwise it will be very easy to break. 480 | # 481 | # requirepass foobared 482 | 483 | # Command renaming. 484 | # 485 | # It is possible to change the name of dangerous commands in a shared 486 | # environment. For instance the CONFIG command may be renamed into something 487 | # hard to guess so that it will still be available for internal-use tools 488 | # but not available for general clients. 489 | # 490 | # Example: 491 | # 492 | # rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 493 | # 494 | # It is also possible to completely kill a command by renaming it into 495 | # an empty string: 496 | # 497 | # rename-command CONFIG "" 498 | # 499 | # Please note that changing the name of commands that are logged into the 500 | # AOF file or transmitted to slaves may cause problems. 501 | 502 | ################################### LIMITS #################################### 503 | 504 | # Set the max number of connected clients at the same time. By default 505 | # this limit is set to 10000 clients, however if the Redis server is not 506 | # able to configure the process file limit to allow for the specified limit 507 | # the max number of allowed clients is set to the current file limit 508 | # minus 32 (as Redis reserves a few file descriptors for internal uses). 509 | # 510 | # Once the limit is reached Redis will close all the new connections sending 511 | # an error 'max number of clients reached'. 512 | # 513 | # maxclients 10000 514 | 515 | # Don't use more memory than the specified amount of bytes. 516 | # When the memory limit is reached Redis will try to remove keys 517 | # according to the eviction policy selected (see maxmemory-policy). 518 | # 519 | # If Redis can't remove keys according to the policy, or if the policy is 520 | # set to 'noeviction', Redis will start to reply with errors to commands 521 | # that would use more memory, like SET, LPUSH, and so on, and will continue 522 | # to reply to read-only commands like GET. 523 | # 524 | # This option is usually useful when using Redis as an LRU cache, or to set 525 | # a hard memory limit for an instance (using the 'noeviction' policy). 526 | # 527 | # WARNING: If you have slaves attached to an instance with maxmemory on, 528 | # the size of the output buffers needed to feed the slaves are subtracted 529 | # from the used memory count, so that network problems / resyncs will 530 | # not trigger a loop where keys are evicted, and in turn the output 531 | # buffer of slaves is full with DELs of keys evicted triggering the deletion 532 | # of more keys, and so forth until the database is completely emptied. 533 | # 534 | # In short... if you have slaves attached it is suggested that you set a lower 535 | # limit for maxmemory so that there is some free RAM on the system for slave 536 | # output buffers (but this is not needed if the policy is 'noeviction'). 537 | # 538 | maxmemory 100m 539 | 540 | # MAXMEMORY POLICY: how Redis will select what to remove when maxmemory 541 | # is reached. You can select among five behaviors: 542 | # 543 | # volatile-lru -> remove the key with an expire set using an LRU algorithm 544 | # allkeys-lru -> remove any key according to the LRU algorithm 545 | # volatile-random -> remove a random key with an expire set 546 | # allkeys-random -> remove a random key, any key 547 | # volatile-ttl -> remove the key with the nearest expire time (minor TTL) 548 | # noeviction -> don't expire at all, just return an error on write operations 549 | # 550 | # Note: with any of the above policies, Redis will return an error on write 551 | # operations, when there are no suitable keys for eviction. 552 | # 553 | # At the date of writing these commands are: set setnx setex append 554 | # incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd 555 | # sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby 556 | # zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby 557 | # getset mset msetnx exec sort 558 | # 559 | # The default is: 560 | # 561 | # maxmemory-policy noeviction 562 | 563 | # LRU and minimal TTL algorithms are not precise algorithms but approximated 564 | # algorithms (in order to save memory), so you can tune it for speed or 565 | # accuracy. For default Redis will check five keys and pick the one that was 566 | # used less recently, you can change the sample size using the following 567 | # configuration directive. 568 | # 569 | # The default of 5 produces good enough results. 10 Approximates very closely 570 | # true LRU but costs a bit more CPU. 3 is very fast but not very accurate. 571 | # 572 | # maxmemory-samples 5 573 | 574 | ############################## APPEND ONLY MODE ############################### 575 | 576 | # By default Redis asynchronously dumps the dataset on disk. This mode is 577 | # good enough in many applications, but an issue with the Redis process or 578 | # a power outage may result into a few minutes of writes lost (depending on 579 | # the configured save points). 580 | # 581 | # The Append Only File is an alternative persistence mode that provides 582 | # much better durability. For instance using the default data fsync policy 583 | # (see later in the config file) Redis can lose just one second of writes in a 584 | # dramatic event like a server power outage, or a single write if something 585 | # wrong with the Redis process itself happens, but the operating system is 586 | # still running correctly. 587 | # 588 | # AOF and RDB persistence can be enabled at the same time without problems. 589 | # If the AOF is enabled on startup Redis will load the AOF, that is the file 590 | # with the better durability guarantees. 591 | # 592 | # Please check http://redis.io/topics/persistence for more information. 593 | 594 | appendonly no 595 | 596 | # The name of the append only file (default: "appendonly.aof") 597 | 598 | appendfilename "appendonly.aof" 599 | 600 | # The fsync() call tells the Operating System to actually write data on disk 601 | # instead of waiting for more data in the output buffer. Some OS will really flush 602 | # data on disk, some other OS will just try to do it ASAP. 603 | # 604 | # Redis supports three different modes: 605 | # 606 | # no: don't fsync, just let the OS flush the data when it wants. Faster. 607 | # always: fsync after every write to the append only log. Slow, Safest. 608 | # everysec: fsync only one time every second. Compromise. 609 | # 610 | # The default is "everysec", as that's usually the right compromise between 611 | # speed and data safety. It's up to you to understand if you can relax this to 612 | # "no" that will let the operating system flush the output buffer when 613 | # it wants, for better performances (but if you can live with the idea of 614 | # some data loss consider the default persistence mode that's snapshotting), 615 | # or on the contrary, use "always" that's very slow but a bit safer than 616 | # everysec. 617 | # 618 | # More details please check the following article: 619 | # http://antirez.com/post/redis-persistence-demystified.html 620 | # 621 | # If unsure, use "everysec". 622 | 623 | # appendfsync always 624 | appendfsync everysec 625 | # appendfsync no 626 | 627 | # When the AOF fsync policy is set to always or everysec, and a background 628 | # saving process (a background save or AOF log background rewriting) is 629 | # performing a lot of I/O against the disk, in some Linux configurations 630 | # Redis may block too long on the fsync() call. Note that there is no fix for 631 | # this currently, as even performing fsync in a different thread will block 632 | # our synchronous write(2) call. 633 | # 634 | # In order to mitigate this problem it's possible to use the following option 635 | # that will prevent fsync() from being called in the main process while a 636 | # BGSAVE or BGREWRITEAOF is in progress. 637 | # 638 | # This means that while another child is saving, the durability of Redis is 639 | # the same as "appendfsync none". In practical terms, this means that it is 640 | # possible to lose up to 30 seconds of log in the worst scenario (with the 641 | # default Linux settings). 642 | # 643 | # If you have latency problems turn this to "yes". Otherwise leave it as 644 | # "no" that is the safest pick from the point of view of durability. 645 | 646 | no-appendfsync-on-rewrite no 647 | 648 | # Automatic rewrite of the append only file. 649 | # Redis is able to automatically rewrite the log file implicitly calling 650 | # BGREWRITEAOF when the AOF log size grows by the specified percentage. 651 | # 652 | # This is how it works: Redis remembers the size of the AOF file after the 653 | # latest rewrite (if no rewrite has happened since the restart, the size of 654 | # the AOF at startup is used). 655 | # 656 | # This base size is compared to the current size. If the current size is 657 | # bigger than the specified percentage, the rewrite is triggered. Also 658 | # you need to specify a minimal size for the AOF file to be rewritten, this 659 | # is useful to avoid rewriting the AOF file even if the percentage increase 660 | # is reached but it is still pretty small. 661 | # 662 | # Specify a percentage of zero in order to disable the automatic AOF 663 | # rewrite feature. 664 | 665 | auto-aof-rewrite-percentage 100 666 | auto-aof-rewrite-min-size 64mb 667 | 668 | # An AOF file may be found to be truncated at the end during the Redis 669 | # startup process, when the AOF data gets loaded back into memory. 670 | # This may happen when the system where Redis is running 671 | # crashes, especially when an ext4 filesystem is mounted without the 672 | # data=ordered option (however this can't happen when Redis itself 673 | # crashes or aborts but the operating system still works correctly). 674 | # 675 | # Redis can either exit with an error when this happens, or load as much 676 | # data as possible (the default now) and start if the AOF file is found 677 | # to be truncated at the end. The following option controls this behavior. 678 | # 679 | # If aof-load-truncated is set to yes, a truncated AOF file is loaded and 680 | # the Redis server starts emitting a log to inform the user of the event. 681 | # Otherwise if the option is set to no, the server aborts with an error 682 | # and refuses to start. When the option is set to no, the user requires 683 | # to fix the AOF file using the "redis-check-aof" utility before to restart 684 | # the server. 685 | # 686 | # Note that if the AOF file will be found to be corrupted in the middle 687 | # the server will still exit with an error. This option only applies when 688 | # Redis will try to read more data from the AOF file but not enough bytes 689 | # will be found. 690 | aof-load-truncated yes 691 | 692 | ################################ LUA SCRIPTING ############################### 693 | 694 | # Max execution time of a Lua script in milliseconds. 695 | # 696 | # If the maximum execution time is reached Redis will log that a script is 697 | # still in execution after the maximum allowed time and will start to 698 | # reply to queries with an error. 699 | # 700 | # When a long running script exceeds the maximum execution time only the 701 | # SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be 702 | # used to stop a script that did not yet called write commands. The second 703 | # is the only way to shut down the server in the case a write command was 704 | # already issued by the script but the user doesn't want to wait for the natural 705 | # termination of the script. 706 | # 707 | # Set it to 0 or a negative value for unlimited execution without warnings. 708 | lua-time-limit 5000 709 | 710 | ################################ REDIS CLUSTER ############################### 711 | # 712 | # ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 713 | # WARNING EXPERIMENTAL: Redis Cluster is considered to be stable code, however 714 | # in order to mark it as "mature" we need to wait for a non trivial percentage 715 | # of users to deploy it in production. 716 | # ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 717 | # 718 | # Normal Redis instances can't be part of a Redis Cluster; only nodes that are 719 | # started as cluster nodes can. In order to start a Redis instance as a 720 | # cluster node enable the cluster support uncommenting the following: 721 | # 722 | # cluster-enabled yes 723 | 724 | # Every cluster node has a cluster configuration file. This file is not 725 | # intended to be edited by hand. It is created and updated by Redis nodes. 726 | # Every Redis Cluster node requires a different cluster configuration file. 727 | # Make sure that instances running in the same system do not have 728 | # overlapping cluster configuration file names. 729 | # 730 | # cluster-config-file nodes-6379.conf 731 | 732 | # Cluster node timeout is the amount of milliseconds a node must be unreachable 733 | # for it to be considered in failure state. 734 | # Most other internal time limits are multiple of the node timeout. 735 | # 736 | # cluster-node-timeout 15000 737 | 738 | # A slave of a failing master will avoid to start a failover if its data 739 | # looks too old. 740 | # 741 | # There is no simple way for a slave to actually have a exact measure of 742 | # its "data age", so the following two checks are performed: 743 | # 744 | # 1) If there are multiple slaves able to failover, they exchange messages 745 | # in order to try to give an advantage to the slave with the best 746 | # replication offset (more data from the master processed). 747 | # Slaves will try to get their rank by offset, and apply to the start 748 | # of the failover a delay proportional to their rank. 749 | # 750 | # 2) Every single slave computes the time of the last interaction with 751 | # its master. This can be the last ping or command received (if the master 752 | # is still in the "connected" state), or the time that elapsed since the 753 | # disconnection with the master (if the replication link is currently down). 754 | # If the last interaction is too old, the slave will not try to failover 755 | # at all. 756 | # 757 | # The point "2" can be tuned by user. Specifically a slave will not perform 758 | # the failover if, since the last interaction with the master, the time 759 | # elapsed is greater than: 760 | # 761 | # (node-timeout * slave-validity-factor) + repl-ping-slave-period 762 | # 763 | # So for example if node-timeout is 30 seconds, and the slave-validity-factor 764 | # is 10, and assuming a default repl-ping-slave-period of 10 seconds, the 765 | # slave will not try to failover if it was not able to talk with the master 766 | # for longer than 310 seconds. 767 | # 768 | # A large slave-validity-factor may allow slaves with too old data to failover 769 | # a master, while a too small value may prevent the cluster from being able to 770 | # elect a slave at all. 771 | # 772 | # For maximum availability, it is possible to set the slave-validity-factor 773 | # to a value of 0, which means, that slaves will always try to failover the 774 | # master regardless of the last time they interacted with the master. 775 | # (However they'll always try to apply a delay proportional to their 776 | # offset rank). 777 | # 778 | # Zero is the only value able to guarantee that when all the partitions heal 779 | # the cluster will always be able to continue. 780 | # 781 | # cluster-slave-validity-factor 10 782 | 783 | # Cluster slaves are able to migrate to orphaned masters, that are masters 784 | # that are left without working slaves. This improves the cluster ability 785 | # to resist to failures as otherwise an orphaned master can't be failed over 786 | # in case of failure if it has no working slaves. 787 | # 788 | # Slaves migrate to orphaned masters only if there are still at least a 789 | # given number of other working slaves for their old master. This number 790 | # is the "migration barrier". A migration barrier of 1 means that a slave 791 | # will migrate only if there is at least 1 other working slave for its master 792 | # and so forth. It usually reflects the number of slaves you want for every 793 | # master in your cluster. 794 | # 795 | # Default is 1 (slaves migrate only if their masters remain with at least 796 | # one slave). To disable migration just set it to a very large value. 797 | # A value of 0 can be set but is useful only for debugging and dangerous 798 | # in production. 799 | # 800 | # cluster-migration-barrier 1 801 | 802 | # By default Redis Cluster nodes stop accepting queries if they detect there 803 | # is at least an hash slot uncovered (no available node is serving it). 804 | # This way if the cluster is partially down (for example a range of hash slots 805 | # are no longer covered) all the cluster becomes, eventually, unavailable. 806 | # It automatically returns available as soon as all the slots are covered again. 807 | # 808 | # However sometimes you want the subset of the cluster which is working, 809 | # to continue to accept queries for the part of the key space that is still 810 | # covered. In order to do so, just set the cluster-require-full-coverage 811 | # option to no. 812 | # 813 | # cluster-require-full-coverage yes 814 | 815 | # In order to setup your cluster make sure to read the documentation 816 | # available at http://redis.io web site. 817 | 818 | ################################## SLOW LOG ################################### 819 | 820 | # The Redis Slow Log is a system to log queries that exceeded a specified 821 | # execution time. The execution time does not include the I/O operations 822 | # like talking with the client, sending the reply and so forth, 823 | # but just the time needed to actually execute the command (this is the only 824 | # stage of command execution where the thread is blocked and can not serve 825 | # other requests in the meantime). 826 | # 827 | # You can configure the slow log with two parameters: one tells Redis 828 | # what is the execution time, in microseconds, to exceed in order for the 829 | # command to get logged, and the other parameter is the length of the 830 | # slow log. When a new command is logged the oldest one is removed from the 831 | # queue of logged commands. 832 | 833 | # The following time is expressed in microseconds, so 1000000 is equivalent 834 | # to one second. Note that a negative number disables the slow log, while 835 | # a value of zero forces the logging of every command. 836 | slowlog-log-slower-than 10000 837 | 838 | # There is no limit to this length. Just be aware that it will consume memory. 839 | # You can reclaim memory used by the slow log with SLOWLOG RESET. 840 | slowlog-max-len 128 841 | 842 | ################################ LATENCY MONITOR ############################## 843 | 844 | # The Redis latency monitoring subsystem samples different operations 845 | # at runtime in order to collect data related to possible sources of 846 | # latency of a Redis instance. 847 | # 848 | # Via the LATENCY command this information is available to the user that can 849 | # print graphs and obtain reports. 850 | # 851 | # The system only logs operations that were performed in a time equal or 852 | # greater than the amount of milliseconds specified via the 853 | # latency-monitor-threshold configuration directive. When its value is set 854 | # to zero, the latency monitor is turned off. 855 | # 856 | # By default latency monitoring is disabled since it is mostly not needed 857 | # if you don't have latency issues, and collecting data has a performance 858 | # impact, that while very small, can be measured under big load. Latency 859 | # monitoring can easily be enabled at runtime using the command 860 | # "CONFIG SET latency-monitor-threshold " if needed. 861 | latency-monitor-threshold 0 862 | 863 | ############################# EVENT NOTIFICATION ############################## 864 | 865 | # Redis can notify Pub/Sub clients about events happening in the key space. 866 | # This feature is documented at http://redis.io/topics/notifications 867 | # 868 | # For instance if keyspace events notification is enabled, and a client 869 | # performs a DEL operation on key "foo" stored in the Database 0, two 870 | # messages will be published via Pub/Sub: 871 | # 872 | # PUBLISH __keyspace@0__:foo del 873 | # PUBLISH __keyevent@0__:del foo 874 | # 875 | # It is possible to select the events that Redis will notify among a set 876 | # of classes. Every class is identified by a single character: 877 | # 878 | # K Keyspace events, published with __keyspace@__ prefix. 879 | # E Keyevent events, published with __keyevent@__ prefix. 880 | # g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ... 881 | # $ String commands 882 | # l List commands 883 | # s Set commands 884 | # h Hash commands 885 | # z Sorted set commands 886 | # x Expired events (events generated every time a key expires) 887 | # e Evicted events (events generated when a key is evicted for maxmemory) 888 | # A Alias for g$lshzxe, so that the "AKE" string means all the events. 889 | # 890 | # The "notify-keyspace-events" takes as argument a string that is composed 891 | # of zero or multiple characters. The empty string means that notifications 892 | # are disabled. 893 | # 894 | # Example: to enable list and generic events, from the point of view of the 895 | # event name, use: 896 | # 897 | # notify-keyspace-events Elg 898 | # 899 | # Example 2: to get the stream of the expired keys subscribing to channel 900 | # name __keyevent@0__:expired use: 901 | # 902 | # notify-keyspace-events Ex 903 | # 904 | # By default all notifications are disabled because most users don't need 905 | # this feature and the feature has some overhead. Note that if you don't 906 | # specify at least one of K or E, no events will be delivered. 907 | notify-keyspace-events "" 908 | 909 | ############################### ADVANCED CONFIG ############################### 910 | 911 | # Hashes are encoded using a memory efficient data structure when they have a 912 | # small number of entries, and the biggest entry does not exceed a given 913 | # threshold. These thresholds can be configured using the following directives. 914 | hash-max-ziplist-entries 512 915 | hash-max-ziplist-value 64 916 | 917 | # Lists are also encoded in a special way to save a lot of space. 918 | # The number of entries allowed per internal list node can be specified 919 | # as a fixed maximum size or a maximum number of elements. 920 | # For a fixed maximum size, use -5 through -1, meaning: 921 | # -5: max size: 64 Kb <-- not recommended for normal workloads 922 | # -4: max size: 32 Kb <-- not recommended 923 | # -3: max size: 16 Kb <-- probably not recommended 924 | # -2: max size: 8 Kb <-- good 925 | # -1: max size: 4 Kb <-- good 926 | # Positive numbers mean store up to _exactly_ that number of elements 927 | # per list node. 928 | # The highest performing option is usually -2 (8 Kb size) or -1 (4 Kb size), 929 | # but if your use case is unique, adjust the settings as necessary. 930 | list-max-ziplist-size -2 931 | 932 | # Lists may also be compressed. 933 | # Compress depth is the number of quicklist ziplist nodes from *each* side of 934 | # the list to *exclude* from compression. The head and tail of the list 935 | # are always uncompressed for fast push/pop operations. Settings are: 936 | # 0: disable all list compression 937 | # 1: depth 1 means "don't start compressing until after 1 node into the list, 938 | # going from either the head or tail" 939 | # So: [head]->node->node->...->node->[tail] 940 | # [head], [tail] will always be uncompressed; inner nodes will compress. 941 | # 2: [head]->[next]->node->node->...->node->[prev]->[tail] 942 | # 2 here means: don't compress head or head->next or tail->prev or tail, 943 | # but compress all nodes between them. 944 | # 3: [head]->[next]->[next]->node->node->...->node->[prev]->[prev]->[tail] 945 | # etc. 946 | list-compress-depth 0 947 | 948 | # Sets have a special encoding in just one case: when a set is composed 949 | # of just strings that happen to be integers in radix 10 in the range 950 | # of 64 bit signed integers. 951 | # The following configuration setting sets the limit in the size of the 952 | # set in order to use this special memory saving encoding. 953 | set-max-intset-entries 512 954 | 955 | # Similarly to hashes and lists, sorted sets are also specially encoded in 956 | # order to save a lot of space. This encoding is only used when the length and 957 | # elements of a sorted set are below the following limits: 958 | zset-max-ziplist-entries 128 959 | zset-max-ziplist-value 64 960 | 961 | # HyperLogLog sparse representation bytes limit. The limit includes the 962 | # 16 bytes header. When an HyperLogLog using the sparse representation crosses 963 | # this limit, it is converted into the dense representation. 964 | # 965 | # A value greater than 16000 is totally useless, since at that point the 966 | # dense representation is more memory efficient. 967 | # 968 | # The suggested value is ~ 3000 in order to have the benefits of 969 | # the space efficient encoding without slowing down too much PFADD, 970 | # which is O(N) with the sparse encoding. The value can be raised to 971 | # ~ 10000 when CPU is not a concern, but space is, and the data set is 972 | # composed of many HyperLogLogs with cardinality in the 0 - 15000 range. 973 | hll-sparse-max-bytes 3000 974 | 975 | # Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in 976 | # order to help rehashing the main Redis hash table (the one mapping top-level 977 | # keys to values). The hash table implementation Redis uses (see dict.c) 978 | # performs a lazy rehashing: the more operation you run into a hash table 979 | # that is rehashing, the more rehashing "steps" are performed, so if the 980 | # server is idle the rehashing is never complete and some more memory is used 981 | # by the hash table. 982 | # 983 | # The default is to use this millisecond 10 times every second in order to 984 | # actively rehash the main dictionaries, freeing memory when possible. 985 | # 986 | # If unsure: 987 | # use "activerehashing no" if you have hard latency requirements and it is 988 | # not a good thing in your environment that Redis can reply from time to time 989 | # to queries with 2 milliseconds delay. 990 | # 991 | # use "activerehashing yes" if you don't have such hard requirements but 992 | # want to free memory asap when possible. 993 | activerehashing yes 994 | 995 | # The client output buffer limits can be used to force disconnection of clients 996 | # that are not reading data from the server fast enough for some reason (a 997 | # common reason is that a Pub/Sub client can't consume messages as fast as the 998 | # publisher can produce them). 999 | # 1000 | # The limit can be set differently for the three different classes of clients: 1001 | # 1002 | # normal -> normal clients including MONITOR clients 1003 | # slave -> slave clients 1004 | # pubsub -> clients subscribed to at least one pubsub channel or pattern 1005 | # 1006 | # The syntax of every client-output-buffer-limit directive is the following: 1007 | # 1008 | # client-output-buffer-limit 1009 | # 1010 | # A client is immediately disconnected once the hard limit is reached, or if 1011 | # the soft limit is reached and remains reached for the specified number of 1012 | # seconds (continuously). 1013 | # So for instance if the hard limit is 32 megabytes and the soft limit is 1014 | # 16 megabytes / 10 seconds, the client will get disconnected immediately 1015 | # if the size of the output buffers reach 32 megabytes, but will also get 1016 | # disconnected if the client reaches 16 megabytes and continuously overcomes 1017 | # the limit for 10 seconds. 1018 | # 1019 | # By default normal clients are not limited because they don't receive data 1020 | # without asking (in a push way), but just after a request, so only 1021 | # asynchronous clients may create a scenario where data is requested faster 1022 | # than it can read. 1023 | # 1024 | # Instead there is a default limit for pubsub and slave clients, since 1025 | # subscribers and slaves receive data in a push fashion. 1026 | # 1027 | # Both the hard or the soft limit can be disabled by setting them to zero. 1028 | client-output-buffer-limit normal 0 0 0 1029 | client-output-buffer-limit slave 256mb 64mb 60 1030 | client-output-buffer-limit pubsub 32mb 8mb 60 1031 | 1032 | # Redis calls an internal function to perform many background tasks, like 1033 | # closing connections of clients in timeout, purging expired keys that are 1034 | # never requested, and so forth. 1035 | # 1036 | # Not all tasks are performed with the same frequency, but Redis checks for 1037 | # tasks to perform according to the specified "hz" value. 1038 | # 1039 | # By default "hz" is set to 10. Raising the value will use more CPU when 1040 | # Redis is idle, but at the same time will make Redis more responsive when 1041 | # there are many keys expiring at the same time, and timeouts may be 1042 | # handled with more precision. 1043 | # 1044 | # The range is between 1 and 500, however a value over 100 is usually not 1045 | # a good idea. Most users should use the default of 10 and raise this up to 1046 | # 100 only in environments where very low latency is required. 1047 | hz 10 1048 | 1049 | # When a child rewrites the AOF file, if the following option is enabled 1050 | # the file will be fsync-ed every 32 MB of data generated. This is useful 1051 | # in order to commit the file to the disk more incrementally and avoid 1052 | # big latency spikes. 1053 | aof-rewrite-incremental-fsync yes 1054 | -------------------------------------------------------------------------------- /docker/Dockerfile: -------------------------------------------------------------------------------- 1 | # Base Image 2 | FROM python:3.6.12-alpine3.11 3 | 4 | # Install PATH 5 | RUN mkdir /opt/opsant && mkdir /etc/supervisord.d 6 | 7 | # Add File 8 | ADD opsant-backend /opt/opsant-backend 9 | 10 | # Install 11 | RUN cd /etc/apk/keys/ && wget 'http://openresty.org/package/admin@openresty.com-5ea678a6.rsa.pub' \ 12 | && sed -i 's/dl-cdn.alpinelinux.org/mirrors.aliyun.com/g' /etc/apk/repositories \ 13 | && echo "http://openresty.org/package/alpine/v3.11/main" >> /etc/apk/repositories && apk update \ 14 | && apk add --no-cache gcc g++ make libffi-dev openssl-dev zlib-dev jpeg-dev mariadb-dev openssh-client openresty \ 15 | && pip --no-cache-dir install supervisor -i http://mirrors.aliyun.com/pypi/simple/ --trusted-host mirrors.aliyun.com \ 16 | && pip --no-cache-dir install -r /opt/opsant-backend/requirements.txt -i http://mirrors.aliyun.com/pypi/simple/ --trusted-host mirrors.aliyun.com 17 | 18 | #Supervisord config 19 | ADD supervisord.conf /etc/supervisord.conf 20 | ADD websocket.ini /etc/supervisord.d/websocket.ini 21 | ADD opsant.ini /etc/supervisord.d/opsant.ini 22 | ADD openresty.ini /etc/supervisord.d/openresty.ini 23 | 24 | # Outside Port 25 | EXPOSE 80 26 | EXPOSE 443 27 | 28 | #supervisord start 29 | CMD ["supervisord", "-c", "/etc/supervisord.conf"] -------------------------------------------------------------------------------- /docker/README.md: -------------------------------------------------------------------------------- 1 | # 使用说明 2 | 3 | ## 构建镜像 4 | 5 | docker build -t opsany/opsant:v1.0.0 . 6 | 7 | 8 | -------------------------------------------------------------------------------- /docker/openresty.ini: -------------------------------------------------------------------------------- 1 | [program:openresty] 2 | command = /usr/local/openresty/nginx/sbin/nginx 3 | startsecs = 0 4 | stopwaitsecs = 0 5 | autostart = true 6 | autorestart = true 7 | redirect_stderr = true 8 | stdout_logfile = /opt/opsant/logs/openresty.log 9 | -------------------------------------------------------------------------------- /docker/opsant.ini: -------------------------------------------------------------------------------- 1 | [program: opsant] 2 | command = gunicorn wsgi --bind 127.0.0.1:8001 -k gevent -w 2 -n opsant-web --access-logfile - --error-logfile - 3 | directory = /opt/opsant-backend 4 | environment = DJANGO_SETTINGS_MODULE=settings,OPS_ANT_ENV="production",PAAS_LOGGING_DIR="/opt/opsant/logs" 5 | startsecs = 0 6 | stopwaitsecs = 0 7 | autostart = true 8 | autorestart = true 9 | redirect_stderr = true 10 | stdout_logfile = /opt/opsant/logs/opsant.log 11 | 12 | 13 | [program: opsant_celery] 14 | command = python3 /opt/opsant-backend/manage.py celery worker -n opsant-celery -l INFO --concurrency=2 15 | directory = /opt/opsant-backend 16 | environment = DJANGO_SETTINGS_MODULE=settings,OPS_ANT_ENV="production",PAAS_LOGGING_DIR="/opt/opsant/logs" 17 | startsecs = 0 18 | stopwaitsecs = 0 19 | autostart = true 20 | autorestart = true 21 | redirect_stderr = true 22 | stdout_logfile = /opt/opsant/logs/opsant_celery.log 23 | 24 | 25 | [program: opsant_beat] 26 | command = python3 /opt/opsant-backend/manage.py celery beat 27 | directory = /opt/opsant-backend 28 | environment = DJANGO_SETTINGS_MODULE=settings,OPS_ANT_ENV="production",PAAS_LOGGING_DIR="/opt/opsant/logs" 29 | startsecs = 0 30 | stopwaitsecs = 0 31 | autostart = true 32 | autorestart = true 33 | redirect_stderr = true 34 | stdout_logfile = /opt/opsant/logs/opsant_celery.log 35 | -------------------------------------------------------------------------------- /docker/supervisord.conf: -------------------------------------------------------------------------------- 1 | ; Sample supervisor config file. 2 | 3 | [unix_http_server] 4 | file=/var/run/supervisor.sock ; (the path to the socket file) 5 | ;chmod=0700 ; sockef file mode (default 0700) 6 | ;chown=nobody:nogroup ; socket file uid:gid owner 7 | ;username=user ; (default is no username (open server)) 8 | ;password=123 ; (default is no password (open server)) 9 | 10 | ;[inet_http_server] ; inet (TCP) server disabled by default 11 | ;port=127.0.0.1:9001 ; (ip_address:port specifier, *:port for all iface) 12 | ;username=user ; (default is no username (open server)) 13 | ;password=123 ; (default is no password (open server)) 14 | 15 | [supervisord] 16 | logfile=/var/log/supervisord.log ; (main log file;default $CWD/supervisord.log) 17 | logfile_maxbytes=50MB ; (max main logfile bytes b4 rotation;default 50MB) 18 | logfile_backups=10 ; (num of main logfile rotation backups;default 10) 19 | loglevel=info ; (log level;default info; others: debug,warn,trace) 20 | pidfile=/var/run/supervisord.pid ; (supervisord pidfile;default supervisord.pid) 21 | nodaemon=true ; (start in foreground if true;default false) 22 | minfds=1024 ; (min. avail startup file descriptors;default 1024) 23 | minprocs=200 ; (min. avail process descriptors;default 200) 24 | ;umask=022 ; (process file creation umask;default 022) 25 | ;user=chrism ; (default is current user, required if root) 26 | ;identifier=supervisor ; (supervisord identifier, default is 'supervisor') 27 | ;directory=/tmp ; (default is not to cd during start) 28 | ;nocleanup=true ; (don't clean up tempfiles at start;default false) 29 | ;childlogdir=/tmp ; ('AUTO' child log dir, default $TEMP) 30 | ;environment=KEY=value ; (key value pairs to add to environment) 31 | ;strip_ansi=false ; (strip ansi escape codes in logs; def. false) 32 | 33 | ; the below section must remain in the config file for RPC 34 | ; (supervisorctl/web interface) to work, additional interfaces may be 35 | ; added by defining them in separate rpcinterface: sections 36 | [rpcinterface:supervisor] 37 | supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface 38 | 39 | [supervisorctl] 40 | serverurl=unix:///var/run/supervisor.sock ; use a unix:// URL for a unix socket 41 | ;serverurl=http://127.0.0.1:9001 ; use an http:// url to specify an inet socket 42 | ;username=chris ; should be same as http_username if set 43 | ;password=123 ; should be same as http_password if set 44 | ;prompt=mysupervisor ; cmd line prompt (default "supervisor") 45 | ;history_file=~/.sc_history ; use readline history if available 46 | 47 | ; The below sample program section shows all possible program subsection values, 48 | ; create one or more 'real' program: sections to be able to control them under 49 | ; supervisor. 50 | 51 | ;[program:theprogramname] 52 | ;command=/bin/cat ; the program (relative uses PATH, can take args) 53 | ;process_name=%(program_name)s ; process_name expr (default %(program_name)s) 54 | ;numprocs=1 ; number of processes copies to start (def 1) 55 | ;directory=/tmp ; directory to cwd to before exec (def no cwd) 56 | ;umask=022 ; umask for process (default None) 57 | ;priority=999 ; the relative start priority (default 999) 58 | ;autostart=true ; start at supervisord start (default: true) 59 | ;autorestart=true ; retstart at unexpected quit (default: true) 60 | ;startsecs=10 ; number of secs prog must stay running (def. 1) 61 | ;startretries=3 ; max # of serial start failures (default 3) 62 | ;exitcodes=0,2 ; 'expected' exit codes for process (default 0,2) 63 | ;stopsignal=QUIT ; signal used to kill process (default TERM) 64 | ;stopwaitsecs=10 ; max num secs to wait b4 SIGKILL (default 10) 65 | ;user=chrism ; setuid to this UNIX account to run the program 66 | ;redirect_stderr=true ; redirect proc stderr to stdout (default false) 67 | ;stdout_logfile=/a/path ; stdout log path, NONE for none; default AUTO 68 | ;stdout_logfile_maxbytes=1MB ; max # logfile bytes b4 rotation (default 50MB) 69 | ;stdout_logfile_backups=10 ; # of stdout logfile backups (default 10) 70 | ;stdout_capture_maxbytes=1MB ; number of bytes in 'capturemode' (default 0) 71 | ;stdout_events_enabled=false ; emit events on stdout writes (default false) 72 | ;stderr_logfile=/a/path ; stderr log path, NONE for none; default AUTO 73 | ;stderr_logfile_maxbytes=1MB ; max # logfile bytes b4 rotation (default 50MB) 74 | ;stderr_logfile_backups=10 ; # of stderr logfile backups (default 10) 75 | ;stderr_capture_maxbytes=1MB ; number of bytes in 'capturemode' (default 0) 76 | ;stderr_events_enabled=false ; emit events on stderr writes (default false) 77 | ;environment=A=1,B=2 ; process environment additions (def no adds) 78 | ;serverurl=AUTO ; override serverurl computation (childutils) 79 | 80 | ; The below sample eventlistener section shows all possible 81 | ; eventlistener subsection values, create one or more 'real' 82 | ; eventlistener: sections to be able to handle event notifications 83 | ; sent by supervisor. 84 | 85 | ;[eventlistener:theeventlistenername] 86 | ;command=/bin/eventlistener ; the program (relative uses PATH, can take args) 87 | ;process_name=%(program_name)s ; process_name expr (default %(program_name)s) 88 | ;numprocs=1 ; number of processes copies to start (def 1) 89 | ;events=EVENT ; event notif. types to subscribe to (req'd) 90 | ;buffer_size=10 ; event buffer queue size (default 10) 91 | ;directory=/tmp ; directory to cwd to before exec (def no cwd) 92 | ;umask=022 ; umask for process (default None) 93 | ;priority=-1 ; the relative start priority (default -1) 94 | ;autostart=true ; start at supervisord start (default: true) 95 | ;autorestart=unexpected ; restart at unexpected quit (default: unexpected) 96 | ;startsecs=10 ; number of secs prog must stay running (def. 1) 97 | ;startretries=3 ; max # of serial start failures (default 3) 98 | ;exitcodes=0,2 ; 'expected' exit codes for process (default 0,2) 99 | ;stopsignal=QUIT ; signal used to kill process (default TERM) 100 | ;stopwaitsecs=10 ; max num secs to wait b4 SIGKILL (default 10) 101 | ;user=chrism ; setuid to this UNIX account to run the program 102 | ;redirect_stderr=true ; redirect proc stderr to stdout (default false) 103 | ;stdout_logfile=/a/path ; stdout log path, NONE for none; default AUTO 104 | ;stdout_logfile_maxbytes=1MB ; max # logfile bytes b4 rotation (default 50MB) 105 | ;stdout_logfile_backups=10 ; # of stdout logfile backups (default 10) 106 | ;stdout_events_enabled=false ; emit events on stdout writes (default false) 107 | ;stderr_logfile=/a/path ; stderr log path, NONE for none; default AUTO 108 | ;stderr_logfile_maxbytes=1MB ; max # logfile bytes b4 rotation (default 50MB) 109 | ;stderr_logfile_backups ; # of stderr logfile backups (default 10) 110 | ;stderr_events_enabled=false ; emit events on stderr writes (default false) 111 | ;environment=A=1,B=2 ; process environment additions 112 | ;serverurl=AUTO ; override serverurl computation (childutils) 113 | 114 | ; The below sample group section shows all possible group values, 115 | ; create one or more 'real' group: sections to create "heterogeneous" 116 | ; process groups. 117 | 118 | ;[group:thegroupname] 119 | ;programs=progname1,progname2 ; each refers to 'x' in [program:x] definitions 120 | ;priority=999 ; the relative start priority (default 999) 121 | 122 | ; The [include] section can just contain the "files" setting. This 123 | ; setting can list multiple files (separated by whitespace or 124 | ; newlines). It can also contain wildcards. The filenames are 125 | ; interpreted as relative to this file. Included files *cannot* 126 | ; include files themselves. 127 | 128 | [include] 129 | files = supervisord.d/*.ini 130 | -------------------------------------------------------------------------------- /docker/websocket.ini: -------------------------------------------------------------------------------- 1 | [program:websocket] 2 | command = daphne --proxy-headers -b 127.0.0.1 -p 8002 asgi:application 3 | directory = /opt/opsant-backend 4 | environment = OPS_ANT_ENV="production",BK_LOG_DIR="/opt/opsant/logs" 5 | startsecs = 0 6 | stopwaitsecs = 0 7 | autostart = true 8 | autorestart = true 9 | redirect_stderr = true 10 | stdout_logfile = /opt/opsant/logs/websocket.log 11 | -------------------------------------------------------------------------------- /generate-ssl.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # create self-signed server certificate: 4 | 5 | # read -p "Enter your domain [www.example.com]: " DOMAIN_NAME 6 | #Config 7 | source ./install.config 8 | 9 | # create dir for ssl 10 | if [ ! -d ./conf/nginx-conf.d/ssl ];then 11 | mkdir -p ./conf/nginx-conf.d/ssl 12 | fi 13 | 14 | cp ./conf/openssl.cnf ./conf/nginx-conf.d/ssl/ 15 | cd ./conf/nginx-conf.d/ssl 16 | 17 | echo "Create server key..." 18 | 19 | openssl genrsa -des3 -passout pass:opsany -out $DOMAIN_NAME.key 2048 >/dev/null 2>&1 20 | 21 | echo "Create server certificate signing request..." 22 | 23 | SUBJECT="/C=CN/ST=BeiJing/L=BeiJing/O=BeiJing/OU=OpsAny/CN=OpsAny" 24 | 25 | openssl req -new -passin pass:opsany -subj $SUBJECT -key $DOMAIN_NAME.key -out $DOMAIN_NAME.csr >/dev/null 2>&1 26 | 27 | echo "Remove password..." 28 | 29 | mv $DOMAIN_NAME.key $DOMAIN_NAME.origin.key 30 | openssl rsa -passin pass:opsany -in $DOMAIN_NAME.origin.key -out $DOMAIN_NAME.key >/dev/null 2>&1 31 | 32 | echo "Sign SSL certificate..." 33 | 34 | openssl x509 -req -days 3650 -extfile openssl.cnf -extensions 'v3_req' -in $DOMAIN_NAME.csr -signkey $DOMAIN_NAME.key -out $DOMAIN_NAME.crt >/dev/null 2>&1 35 | 36 | openssl x509 -in ${DOMAIN_NAME}.crt -out ${DOMAIN_NAME}.pem -outform PEM >/dev/null 2>&1 37 | 38 | mv ${DOMAIN_NAME}.pem ${DOMAIN_NAME}.origin.pem 39 | 40 | cat ${DOMAIN_NAME}.key ${DOMAIN_NAME}.origin.pem > ${DOMAIN_NAME}.pem 41 | 42 | rm -f ./conf/openssl.cnf 43 | -------------------------------------------------------------------------------- /install.config.example: -------------------------------------------------------------------------------- 1 | # OpsAnt VERSION 2 | OPSANT_VERSION="1.0.0" 3 | 4 | # Network 5 | LOCAL_IP="192.168.56.11" 6 | DOMAIN_NAME="192.168.56.11" 7 | 8 | # PAAS Service 9 | PAAS_DOCKER_REG="hub.docker.com" 10 | 11 | # MySQL 12 | MYSQL_SERVER_IP="${LOCAL_IP}" 13 | MYSQL_ROOT_PASSWORD="OpsAnt@2021" 14 | MYSQL_OPSANT_PASSWORD="OpsAnt@2021" 15 | 16 | # Redis 17 | REDIS_SERVER_IP="${LOCAL_IP}" 18 | REDIS_SERVER_PASSWORD="OpsAnt2021" 19 | 20 | #Guacd 21 | GUACD_HOST="${LOCAL_IP}" 22 | -------------------------------------------------------------------------------- /install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #****************************************** 3 | # Author: Jason Zhao 4 | # Email: zhaoshundong@opsant.com 5 | # Organization: https://www.opsany.com/ 6 | # Description: OpsAnt Install Script 7 | #****************************************** 8 | 9 | #Data/Time 10 | CTIME=$(date "+%Y-%m-%d-%H-%M") 11 | 12 | #Shell ENV 13 | SHELL_NAME="install.sh" 14 | SHELL_LOG="${SHELL_NAME}.log" 15 | INSTALL_PATH="/opt/opsant" 16 | 17 | #Check Config 18 | if [ ! -f ./install.config ];then 19 | echo "Please Copy install.config and Change: cp install.config.example install.config" 20 | exit 21 | else 22 | source ./install.config 23 | fi 24 | 25 | # generate ssl certs 26 | ssl_make(){ 27 | base_dir=$(pwd) 28 | source ./generate-ssl.sh 29 | cd $base_dir 30 | } 31 | # Record Shell log 32 | shell_log(){ 33 | LOG_INFO=$1 34 | echo "----------------$CTIME ${SHELL_NAME} : ${LOG_INFO}----------------" 35 | echo "$CTIME ${SHELL_NAME} : ${LOG_INFO}" >> ${SHELL_LOG} 36 | } 37 | 38 | 39 | # Check Install requirement 40 | install_check(){ 41 | DOCKER_PID=$(ps aux | grep '/usr/bin/containerd' | grep -v 'grep' | wc -l) 42 | if [ ${DOCKER_PID} -lt 1 ];then 43 | echo "Please install and start docker first!!!" 44 | exit 45 | fi 46 | } 47 | 48 | # Install Init 49 | opsant_init(){ 50 | shell_log "Start: Install Init" 51 | mkdir -p ${INSTALL_PATH}/{uploads/guacamole,logs,redis-volume,mongodb-volume,mysql-volume} 52 | /bin/cp -r conf ${INSTALL_PATH}/ 53 | #/bin/cp -r agent ${INSTALL_PATH}/uploads/ 54 | shell_log "End: Install Init" 55 | } 56 | 57 | # Share Service Start 58 | opsant_install(){ 59 | # Redis 60 | shell_log "======启动Redis======" 61 | sed -i "s/REDIS_SERVER_PASSWORD/${REDIS_SERVER_PASSWORD}/g" ${INSTALL_PATH}/conf/redis.conf 62 | docker run -d --restart=always --name opsant-redis \ 63 | -p 6379:6379 -v ${INSTALL_PATH}/redis-volume:/data \ 64 | -v ${INSTALL_PATH}/conf/redis.conf:/data/redis.conf \ 65 | opsany/redis:6.0.9-alpine redis-server /data/redis.conf 66 | 67 | # MySQL 68 | shell_log "======启动MySQL======" 69 | docker run -d --restart=always --name opsant-mysql \ 70 | -e MYSQL_ROOT_PASSWORD="$MYSQL_ROOT_PASSWORD" \ 71 | -p 3306:3306 -v ${INSTALL_PATH}/mysql-volume:/var/lib/mysql \ 72 | -v ${INSTALL_PATH}/conf/mysqld.cnf:/etc/mysql/mysql.conf.d/mysqld.cnf \ 73 | -v ${INSTALL_PATH}/logs:/var/log/mysql \ 74 | opsany/mysql:5.6.50 --character-set-server=utf8 --collation-server=utf8_general_ci 75 | 76 | # Guacd 77 | shell_log "======启动Guacd======" 78 | docker run -d --restart=always --name opsant-guacd \ 79 | -p 4822:4822 \ 80 | -v ${INSTALL_PATH}/uploads/guacamole:/srv/guacamole \ 81 | opsany/guacd:1.2.0 82 | } 83 | 84 | # MySQL init 85 | mysql_init(){ 86 | shell_log "======进行MySQL数据初始化======" 87 | sleep 20 88 | export MYSQL_PWD=${MYSQL_ROOT_PASSWORD} 89 | mysql -h "${LOCAL_IP}" -u root -e "CREATE DATABASE IF NOT EXISTS opsant DEFAULT CHARACTER SET utf8 COLLATE utf8_general_ci;" 90 | mysql -h "${LOCAL_IP}" -u root -e "grant all on opsant.* to opsant@'%' identified by "\"${MYSQL_OPSANT_PASSWORD}\"";" 91 | #mysql -h "${LOCAL_IP}" -u root opsant < opsant.sql 92 | } 93 | 94 | # Config 95 | opsant_config(){ 96 | shell_log "======进行OpsAnt配置修改======" 97 | # PaaS Config 98 | sed -i "s/MYSQL_SERVER_IP/${MYSQL_SERVER_IP}/g" ${INSTALL_PATH}/conf/prod.py 99 | sed -i "s/MYSQL_OPSANT_PASSWORD/${MYSQL_OPSANT_PASSWORD}/g" ${INSTALL_PATH}/conf/prod.py 100 | sed -i "s/REDIS_SERVER_IP/${REDIS_SERVER_IP}/g" ${INSTALL_PATH}/conf/prod.py 101 | sed -i "s/REDIS_SERVER_PASSWORD/${REDIS_SERVER_PASSWORD}/g" ${INSTALL_PATH}/conf/prod.py 102 | sed -i "s/GUACD_SERVER_IP/${GUACD_SERVER_IP}/g" ${INSTALL_PATH}/conf/prod.py 103 | 104 | # OpenResty 105 | sed -i "s/DOMAIN_NAME/${DOMAIN_NAME}/g" ${INSTALL_PATH}/conf/nginx-conf.d/opsant.conf 106 | sed -i "s/LOCAL_IP/${LOCAL_IP}/g" ${INSTALL_PATH}/conf/nginx-conf.d/opsant.conf 107 | } 108 | 109 | opsant_start(){ 110 | shell_log "======启动OpsAnt服务======" 111 | docker run -d --restart=always --name opsant-web \ 112 | -p 80:80 -p 443:443 -v ${INSTALL_PATH}/logs:/opt/opsant/logs \ 113 | -v ${INSTALL_PATH}/conf/nginx-conf.d:/usr/local/openresty/nginx/conf.d \ 114 | -v ${INSTALL_PATH}/conf/nginx.conf:/usr/local/openresty/nginx/conf/nginx.conf \ 115 | -v ${INSTALL_PATH}/conf/prod.py:/opt/opsant-backend/config/prod.py \ 116 | -v ${INSTALL_PATH}/uploads:/opt/opsant/uploads \ 117 | opsany/opsant:${OPSANT_VERSION} 118 | } 119 | 120 | opsant_set(){ 121 | shell_log "======初始化数据库和admin用户密码======" 122 | sleep 20 123 | ADMIN_PASSWD=$(openssl rand -base64 8) 124 | docker exec -e OPS_ANT_ENV=production opsant-web sh -c "/usr/local/bin/python3 /opt/opsant-backend/manage.py migrate" 125 | docker exec -e OPS_ANT_ENV=production opsant-web sh -c "/usr/local/bin/python3 /opt/opsant-backend/manage.py create_user --password $ADMIN_PASSWD" 126 | echo "======OpsAnt容器化部署完毕======" 127 | echo "访问地址https://${DOMAIN_NAME}" 128 | echo "初始化用户名:admin 密码:$ADMIN_PASSWD 请及时修改密码。默认保存在/tmp/opsant.password" 129 | echo "$ADMIN_PASSWD" > /tmp/opsant.password 130 | } 131 | 132 | main(){ 133 | install_check 134 | ssl_make 135 | opsant_init 136 | opsant_install 137 | mysql_init 138 | opsant_config 139 | opsant_start 140 | opsant_set 141 | } 142 | 143 | main 144 | --------------------------------------------------------------------------------