├── .gitignore ├── LICENSE ├── README.md ├── local ├── rabbitmq │ ├── README.md │ ├── docker-compose.yml │ ├── start-rabbitmq.sh │ └── stop-rabbitmq.sh ├── single-host-multi-node-elk │ ├── README.md │ ├── docker-compose.yml │ ├── elasticsearch │ │ ├── Dockerfile │ │ └── elasticsearch.yml │ ├── kibana │ │ ├── Dockerfile │ │ └── kibana.yml │ ├── logstash │ │ ├── Dockerfile │ │ ├── config │ │ │ └── logstash.yml │ │ └── pipeline │ │ │ └── logstash.conf │ ├── redis │ │ └── Dockerfile │ ├── start.sh │ └── stop.sh ├── single-host-single-node-elk │ ├── README.md │ ├── docker-compose.yml │ ├── elasticsearch │ │ ├── Dockerfile │ │ └── elasticsearch.yml │ ├── kibana │ │ ├── Dockerfile │ │ └── kibana.yml │ ├── logstash │ │ ├── Dockerfile │ │ ├── config │ │ │ └── logstash.yml │ │ └── pipeline │ │ │ └── logstash.conf │ ├── redis │ │ └── Dockerfile │ ├── start.sh │ └── stop.sh └── zipkin │ ├── README.md │ └── start-zipkin.sh └── remote ├── elk ├── README.md ├── deploy.sh ├── elk-playbook.yml └── vagrant │ ├── README.md │ ├── Vagrantfile │ ├── destroy.sh │ └── start.sh ├── inventory.ini ├── provision-playbook.yml ├── provision.sh └── roles ├── elasticsearch ├── README.md └── tasks │ └── main.yml ├── kibana ├── README.md └── tasks │ └── main.yml ├── logstash ├── README.md ├── defaults │ └── main.yml ├── files │ └── logstash-docker-compose-files │ │ ├── docker-compose.yml │ │ ├── logstash │ │ ├── Dockerfile │ │ ├── config │ │ │ └── logstash.yml │ │ └── pipeline │ │ │ └── logstash.conf │ │ └── redis │ │ └── Dockerfile └── tasks │ └── main.yml └── provision ├── README.md ├── defaults └── main.yml ├── tasks ├── centos.yml ├── main.yml └── ubuntu.yml └── templates └── docker.service.j2 /.gitignore: -------------------------------------------------------------------------------- 1 | .classpath 2 | .project 3 | .settings 4 | .gradle 5 | .idea 6 | .shelf 7 | *.iml 8 | *.ipr 9 | *.iws 10 | .DS_Store 11 | out/ 12 | build/** 13 | classes/ 14 | .ideaDataSources/ 15 | dataSources/ 16 | tmp.json 17 | *.retry 18 | .vagrant 19 | *.log -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # 项目简介 2 | 该项目包含Ecommerce项目的基础设施。 3 | 4 | 5 | Ecommerce项目包括: 6 | 7 | |代码库|用途|地址| 8 | | --- | --- | --- | 9 | |order-backend|Order服务|[https://github.com/e-commerce-sample/order-backend](https://github.com/e-commerce-sample/order-backend)| 10 | |product-backend|Product服务|[https://github.com/e-commerce-sample/product-backend](https://github.com/e-commerce-sample/product-backend)| 11 | |inventory-backend|Inventory服务|[https://github.com/e-commerce-sample/inventory-backend](https://github.com/e-commerce-sample/inventory-backend)| 12 | |common|共享依赖包|[https://github.com/e-commerce-sample/common](https://github.com/e-commerce-sample/common)| 13 | |devops|基础设施|[https://github.com/e-commerce-sample/devops](https://github.com/e-commerce-sample/devops)| 14 | 15 | # 技术选型 16 | Spring Boot、Gradle、MySQL、Junit 5、Rest Assured、Docker、RabbitMQ、Ansible 17 | 18 | # 目录结构 19 | - local主要包含本地开发过程所需要用到的基础设施,比如RabbitMQ和ELK等,均通过Docker在本地机器启动。 20 | - remote主要用于生产环境所需的基础设施,主要针对"虚拟机+Docker"的部署场景,本地使用Vagrant虚拟机。 21 | 22 | -------------------------------------------------------------------------------- /local/rabbitmq/README.md: -------------------------------------------------------------------------------- 1 | # 本地RabbitMQ服务器 2 | 3 | |功能|命令|备注| 4 | | --- | --- | --- | 5 | |启动RabbitMQ|`./start-rabbitmq.sh`|RabbitMQ访问:[http://localhost:15672/](http://localhost:15672/)| 6 | |停止RabbitMQ|`./stop-rabbitmq.sh`|将清空所有network和volume| 7 | 8 | -------------------------------------------------------------------------------- /local/rabbitmq/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "2" 2 | services: 3 | rabbitmq: 4 | restart: always 5 | container_name: ecommerce-order-rabbitmq 6 | image: rabbitmq:3-management 7 | networks: 8 | - ecommerce-order-net 9 | environment: 10 | - "RABBITMQ_DEFAULT_USER=rabbitmq-user" 11 | - "RABBITMQ_DEFAULT_PASS=rabbitmq-password" 12 | volumes: 13 | - ecommerce-order-rabbitmq-data:/var/lib/rabbitmq 14 | ports: 15 | - "15672:15672" 16 | - "5672:5672" 17 | 18 | networks: 19 | ecommerce-order-net: 20 | driver: bridge 21 | 22 | volumes: 23 | ecommerce-order-rabbitmq-data: 24 | driver: local -------------------------------------------------------------------------------- /local/rabbitmq/start-rabbitmq.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | docker-compose down --volumes 3 | 4 | docker-compose up -d 5 | -------------------------------------------------------------------------------- /local/rabbitmq/stop-rabbitmq.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | docker-compose down --volumes 4 | -------------------------------------------------------------------------------- /local/single-host-multi-node-elk/README.md: -------------------------------------------------------------------------------- 1 | ## 用法 2 | - 本地启动整个ELK:`./start.sh`,会启动2个ES节点(es1/es2),其中只有es1能够接收外部请求 3 | - 启动之后检查ES集群状态:`curl http://localhost:9202/_cluster/health?pretty` 4 | - 查看节点情况:`curl http://localhost:9202/_cat/nodes?v` 5 | - 启动之后可以访问Kibana:[http://localhsot:5603](http://localhsot:5603) 6 | - 首次启动Kibana需要创建Index(需要ES中有Index数据之后才行),Logstash默认的Index为`logstash-*`格式。 7 | - 关闭ELK: `./stop.sh`,将清空所有数据 8 | - 用2种方式打日志:通过gelf和通过redis 9 | - Redis默认密码:`changeme` 10 | 11 | 12 | ## redis日志测试 13 | - 保证[http://localhsot:5603](http://localhsot:5603)可以正常访问 14 | - 登录Redis: `redis-cli -h localhost -p 6381 -a changeme` 15 | - 在Redis中随便打点内容(需要json格式):`lpush redis-log '{"msg":"hello world"}'` 16 | - 在Kibana上创建名为`logstash-*`的Index 17 | - 在Kibana上切换到`discovery`页面便可以看到`hello world`了。 18 | 19 | ## 端口映射 20 | 21 | |宿主机端口|Docker容器|Docker容器端口| 22 | | --- | --- | --- | 23 | |9202|es1|9200| 24 | |9302|es1|9300| 25 | |12203|logstash|12201(udp)| 26 | |5603|kibana|5601| 27 | |6381|redis|6379| 28 | -------------------------------------------------------------------------------- /local/single-host-multi-node-elk/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "2" 2 | services: 3 | es1: 4 | restart: always 5 | container_name: local-es-cluster-1 6 | image: local-es-cluster:latest 7 | build: 8 | context: elasticsearch 9 | dockerfile: Dockerfile 10 | networks: 11 | - local-elk-net 12 | environment: 13 | node.name: es1 14 | discovery.seed_hosts: es2 15 | cluster.initial_master_nodes: es1,es2 16 | ES_JAVA_OPTS: "-Xms1024m -Xmx1024m" 17 | TZ: "Asia/Shanghai" 18 | ports: 19 | - 9202:9200 20 | - 9302:9300 21 | ulimits: 22 | memlock: 23 | soft: -1 24 | hard: -1 25 | nofile: 26 | soft: 65536 27 | hard: 65536 28 | mem_limit: 10g 29 | volumes: 30 | - local-es-cluster-1-data:/usr/share/elasticsearch/data 31 | 32 | es2: 33 | restart: always 34 | container_name: local-es-cluster-2 35 | image: local-es-cluster:latest 36 | build: 37 | context: elasticsearch 38 | dockerfile: Dockerfile 39 | networks: 40 | - local-elk-net 41 | environment: 42 | node.name: es2 43 | discovery.seed_hosts: es1 44 | cluster.initial_master_nodes: es1,es2 45 | ES_JAVA_OPTS: "-Xms1024m -Xmx1024m" 46 | TZ: "Asia/Shanghai" 47 | ulimits: 48 | memlock: 49 | soft: -1 50 | hard: -1 51 | nofile: 52 | soft: 65536 53 | hard: 65536 54 | mem_limit: 10g 55 | volumes: 56 | - local-es-cluster-2-data:/usr/share/elasticsearch/data 57 | 58 | logstash: 59 | restart: always 60 | container_name: local-elk-logstash 61 | image: local-elk-logstash:latest 62 | build: 63 | context: logstash 64 | dockerfile: Dockerfile 65 | networks: 66 | - local-elk-net 67 | ports: 68 | - "12203:12201/udp" 69 | depends_on: 70 | - es1 71 | environment: 72 | LS_JAVA_OPTS: "-Xms1024m -Xmx1024m" 73 | TZ: "Asia/Shanghai" 74 | 75 | kibana: 76 | restart: always 77 | container_name: local-elk-kibana 78 | image: local-elk-kibana:latest 79 | build: 80 | context: kibana 81 | dockerfile: Dockerfile 82 | networks: 83 | - local-elk-net 84 | ports: 85 | - "5603:5601" 86 | depends_on: 87 | - es1 88 | environment: 89 | TZ: "Asia/Shanghai" 90 | 91 | redis: 92 | restart: always 93 | container_name: local-elk-redis 94 | image: local-elk-redis:latest 95 | build: 96 | context: redis 97 | dockerfile: Dockerfile 98 | networks: 99 | - local-elk-net 100 | ports: 101 | - "6381:6379" 102 | environment: 103 | TZ: "Asia/Shanghai" 104 | volumes: 105 | - redis-data:/data 106 | 107 | networks: 108 | local-elk-net: 109 | driver: bridge 110 | 111 | volumes: 112 | local-es-cluster-1-data: 113 | driver: local 114 | local-es-cluster-2-data: 115 | driver: local 116 | redis-data: 117 | driver: local 118 | -------------------------------------------------------------------------------- /local/single-host-multi-node-elk/elasticsearch/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM docker.elastic.co/elasticsearch/elasticsearch:7.3.0 2 | 3 | COPY --chown=elasticsearch:elasticsearch elasticsearch.yml /usr/share/elasticsearch/config/ 4 | -------------------------------------------------------------------------------- /local/single-host-multi-node-elk/elasticsearch/elasticsearch.yml: -------------------------------------------------------------------------------- 1 | --- 2 | cluster.name: "local-es-cluster" 3 | network.host: 0.0.0.0 4 | bootstrap.memory_lock: true -------------------------------------------------------------------------------- /local/single-host-multi-node-elk/kibana/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM docker.elastic.co/kibana/kibana:7.3.0 2 | 3 | COPY kibana.yml /usr/share/kibana/config/kibana.yml 4 | -------------------------------------------------------------------------------- /local/single-host-multi-node-elk/kibana/kibana.yml: -------------------------------------------------------------------------------- 1 | --- 2 | server.name: kibana 3 | server.host: "0" 4 | elasticsearch.hosts: ["http://es1:9200","http://es2:9200"] 5 | -------------------------------------------------------------------------------- /local/single-host-multi-node-elk/logstash/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM docker.elastic.co/logstash/logstash:7.3.0 2 | 3 | RUN rm -f /usr/share/logstash/pipeline/logstash.conf 4 | ADD pipeline/ /usr/share/logstash/pipeline/ 5 | ADD config/ /usr/share/logstash/config/ -------------------------------------------------------------------------------- /local/single-host-multi-node-elk/logstash/config/logstash.yml: -------------------------------------------------------------------------------- 1 | --- 2 | http.host: "0.0.0.0" 3 | path.config: /usr/share/logstash/pipeline 4 | -------------------------------------------------------------------------------- /local/single-host-multi-node-elk/logstash/pipeline/logstash.conf: -------------------------------------------------------------------------------- 1 | input { 2 | redis { 3 | host => "redis" 4 | data_type => "list" 5 | key => "redis-log" 6 | password => "changeme" 7 | codec => "json" 8 | } 9 | 10 | gelf { 11 | type => "gelf" 12 | } 13 | } 14 | 15 | 16 | filter { 17 | if [type] == "redis" { 18 | mutate { 19 | add_field => { "app" => "%{tags[0]}" } 20 | } 21 | } 22 | 23 | if [type] == "gelf" { 24 | mutate { 25 | add_field => { "app" => "%{container_name}" } 26 | } 27 | } 28 | } 29 | 30 | output { 31 | elasticsearch { hosts => ["es1:9200","es2:9200"] } 32 | } 33 | -------------------------------------------------------------------------------- /local/single-host-multi-node-elk/redis/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM redis 2 | 3 | ENV REDIS_PASSWORD changeme 4 | 5 | CMD ["sh", "-c", "exec redis-server --requirepass \"$REDIS_PASSWORD\""] 6 | -------------------------------------------------------------------------------- /local/single-host-multi-node-elk/start.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | docker-compose down --volumes 3 | 4 | docker-compose up -d --build 5 | -------------------------------------------------------------------------------- /local/single-host-multi-node-elk/stop.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | docker-compose down --volumes 4 | -------------------------------------------------------------------------------- /local/single-host-single-node-elk/README.md: -------------------------------------------------------------------------------- 1 | ## 用法 2 | - 本地启动整个ELK:`./start.sh` 3 | - 启动之后检查ES集群状态:`curl http://localhost:9201/_cluster/health?pretty` 4 | - 查看节点情况:`curl http://localhost:9201/_cat/nodes?v` 5 | - 启动之后可以访问Kibana:[http://localhost:5602](http://localhost:5602) 6 | - 首次启动Kibana需要创建Index(需要ES中有Index数据之后才行),Logstash默认的Index为`logstash-*`格式。 7 | - 关闭ELK: `./stop.sh`,将清空所有数据 8 | - 用2种方式打日志:通过gelf和通过redis 9 | - Redis默认密码:`changeme` 10 | 11 | 12 | ## redis日志测试 13 | - 保证[http://localhost:5602](http://localhost:5602)可以正常访问 14 | - 登录Redis: `redis-cli -h localhost -p 6380 -a changeme` 15 | - 在Redis中随便打点内容(需要json格式):`lpush redis-log '{"msg":"hello world"}'` 16 | - 在Kibana上创建名为`logstash-*`的Index 17 | - 在Kibana上切换到`discovery`页面便可以看到`hello world`了。 18 | 19 | ## 端口映射 20 | 21 | |宿主机端口|Docker容器|Docker容器端口| 22 | | --- | --- | --- | 23 | |9201|elasticsearch|9200| 24 | |9301|elasticsearch|9300| 25 | |12202|logstash|12201(udp)| 26 | |5602|kibana|5601| 27 | |6380|redis|6379| 28 | 29 | ## 启用Elasticsearch认证 30 | - 在`elasticsearch.yml`中配置: 31 | 32 | ``` 33 | xpack.security.enabled: true 34 | ``` 35 | 36 | - 启动ELK:`./start.sh` 37 | - 登录到elasticsearch容器中:`docker exec -it single-node-elk-es bash` 38 | - `cd`到 `/usr/share/elasticsearch/bin`目录 39 | - 设置密码:`./elasticsearch-setup-passwords auto`(自动生成)或者./elasticsearch-setup-passwords interactive`(手动生成) 40 | - 通过Basic Authentication访问API: 41 | 42 | ``` 43 | curl http://elastic:password-for-es@localhost:9201/_cluster/health?pretty 44 | ``` 45 | - 在`kibana.yml`中配置Kibana的访问用户: 46 | 47 | ``` 48 | elasticsearch.username: "kibana" 49 | elasticsearch.password: "password-for `kibana` user" 50 | ``` 51 | 52 | - 访问Kibana:[http://localhost:5602](http://localhost:5602) 53 | - 用设置的账户登录 -------------------------------------------------------------------------------- /local/single-host-single-node-elk/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "2" 2 | services: 3 | elasticsearch: 4 | restart: always 5 | container_name: single-node-elk-es 6 | image: single-node-elk-es:latest 7 | build: 8 | context: elasticsearch 9 | dockerfile: Dockerfile 10 | networks: 11 | - single-node-elk-net 12 | environment: 13 | ES_JAVA_OPTS: "-Xms1024m -Xmx1024m" 14 | TZ: "Asia/Shanghai" 15 | # ELASTIC_PASSWORD: "my_own_password" # used with xpack.security.enabled 16 | ports: 17 | - 9201:9200 18 | - 9301:9300 19 | ulimits: 20 | memlock: 21 | soft: -1 22 | hard: -1 23 | nofile: 24 | soft: 65536 25 | hard: 65536 26 | mem_limit: 10g 27 | volumes: 28 | - single-node-elk-data:/usr/share/elasticsearch/data 29 | 30 | logstash: 31 | restart: always 32 | container_name: single-node-elk-logstash 33 | image: single-node-elk-logstash:latest 34 | build: 35 | context: logstash 36 | dockerfile: Dockerfile 37 | networks: 38 | - single-node-elk-net 39 | ports: 40 | - "12202:12201/udp" 41 | depends_on: 42 | - elasticsearch 43 | environment: 44 | LS_JAVA_OPTS: "-Xms1024m -Xmx1024m" 45 | TZ: "Asia/Shanghai" 46 | 47 | kibana: 48 | restart: always 49 | container_name: single-node-elk-kibana 50 | image: single-node-elk-kibana:latest 51 | build: 52 | context: kibana 53 | dockerfile: Dockerfile 54 | networks: 55 | - single-node-elk-net 56 | ports: 57 | - "5602:5601" 58 | depends_on: 59 | - elasticsearch 60 | environment: 61 | TZ: "Asia/Shanghai" 62 | 63 | redis: 64 | restart: always 65 | container_name: single-node-elk-redis 66 | image: single-node-elk-redis:latest 67 | build: 68 | context: redis 69 | dockerfile: Dockerfile 70 | networks: 71 | - single-node-elk-net 72 | ports: 73 | - "6380:6379" 74 | environment: 75 | TZ: "Asia/Shanghai" 76 | volumes: 77 | - redis-data:/data 78 | 79 | networks: 80 | single-node-elk-net: 81 | driver: bridge 82 | 83 | volumes: 84 | single-node-elk-data: 85 | driver: local 86 | redis-data: 87 | driver: local -------------------------------------------------------------------------------- /local/single-host-single-node-elk/elasticsearch/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM docker.elastic.co/elasticsearch/elasticsearch:7.3.0 2 | 3 | COPY --chown=elasticsearch:elasticsearch elasticsearch.yml /usr/share/elasticsearch/config/ 4 | -------------------------------------------------------------------------------- /local/single-host-single-node-elk/elasticsearch/elasticsearch.yml: -------------------------------------------------------------------------------- 1 | --- 2 | cluster.name: "single-node-es-cluster" 3 | network.host: 0.0.0.0 4 | discovery.zen.minimum_master_nodes: 1 5 | discovery.type: single-node 6 | 7 | #xpack.security.enabled: true # used with ELASTIC_PASSWORD 8 | -------------------------------------------------------------------------------- /local/single-host-single-node-elk/kibana/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM docker.elastic.co/kibana/kibana:7.3.0 2 | 3 | COPY kibana.yml /usr/share/kibana/config/kibana.yml 4 | -------------------------------------------------------------------------------- /local/single-host-single-node-elk/kibana/kibana.yml: -------------------------------------------------------------------------------- 1 | --- 2 | server.name: kibana 3 | server.host: "0" 4 | elasticsearch.hosts: ["http://elasticsearch:9200"] 5 | -------------------------------------------------------------------------------- /local/single-host-single-node-elk/logstash/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM docker.elastic.co/logstash/logstash:7.3.0 2 | 3 | RUN rm -f /usr/share/logstash/pipeline/logstash.conf 4 | ADD pipeline/ /usr/share/logstash/pipeline/ 5 | ADD config/ /usr/share/logstash/config/ -------------------------------------------------------------------------------- /local/single-host-single-node-elk/logstash/config/logstash.yml: -------------------------------------------------------------------------------- 1 | --- 2 | http.host: "0.0.0.0" 3 | path.config: /usr/share/logstash/pipeline 4 | -------------------------------------------------------------------------------- /local/single-host-single-node-elk/logstash/pipeline/logstash.conf: -------------------------------------------------------------------------------- 1 | input { 2 | redis { 3 | host => "redis" 4 | data_type => "list" 5 | key => "redis-log" 6 | password => "changeme" 7 | codec => "json" 8 | } 9 | 10 | gelf { 11 | type => "gelf" 12 | } 13 | } 14 | 15 | 16 | filter { 17 | if [type] == "redis" { 18 | mutate { 19 | add_field => { "app" => "%{tags[0]}" } 20 | } 21 | } 22 | 23 | if [type] == "gelf" { 24 | mutate { 25 | add_field => { "app" => "%{container_name}" } 26 | } 27 | } 28 | } 29 | 30 | output { 31 | elasticsearch { hosts => ["elasticsearch:9200"] } 32 | } 33 | -------------------------------------------------------------------------------- /local/single-host-single-node-elk/redis/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM redis 2 | 3 | ENV REDIS_PASSWORD changeme 4 | 5 | CMD ["sh", "-c", "exec redis-server --requirepass \"$REDIS_PASSWORD\""] 6 | -------------------------------------------------------------------------------- /local/single-host-single-node-elk/start.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | docker-compose down --volumes 3 | 4 | docker-compose up -d --build 5 | -------------------------------------------------------------------------------- /local/single-host-single-node-elk/stop.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | docker-compose down --volumes 4 | -------------------------------------------------------------------------------- /local/zipkin/README.md: -------------------------------------------------------------------------------- 1 | # 本地Zipkin服务器 2 | 3 | |功能|命令|备注| 4 | | --- | --- | --- | 5 | |启动RabbitMQ|`./start-zipkin.sh`|RabbitMQ访问:[http://localhost:9411/](http://localhost:9411/)| 6 | 7 | -------------------------------------------------------------------------------- /local/zipkin/start-zipkin.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | docker rm -f zipkin 4 | docker run -d -p 9411:9411 --name zipkin openzipkin/zipkin -------------------------------------------------------------------------------- /remote/elk/README.md: -------------------------------------------------------------------------------- 1 | ## Installation step 2 | - First create 5 VMs using Vagrant,`cd` to `vagrant` folder and run: `./start.sh`, every VM will get a DNS name 3 | - Make sure the 5 VMs is listed in `inventory.ini` 4 | - Provision all nodes: `cd` to project root, `./provision.sh elk-all` 5 | - Deploy the whole ELK stack: `cd` to `elk`, run `./deploy.sh all` 6 | 7 | ## redis日志测试 8 | - 保证[http://kibana.vagrant.local:5601](http://kibana.vagrant.local:5601)可以正常访问 9 | - 登录Redis: `redis-cli -h logstash.vagrant.local -p 6379 -a changeme` 10 | - 在Redis中随便打点内容(需要json格式):`lpush redis-log '{"msg":"hello world"}'` 11 | - 在Kibana上创建名为`logstash-*`的Index 12 | - 在Kibana上切换到`discovery`页面便可以看到`hello world`了。 13 | 14 | 15 | ## Deploy commands: 16 | 17 | |Command|Usage| 18 | | --- | --- | 19 | |`deploy.sh all`|Deploy the whole ELK stack| 20 | |`deploy.sh es`|Deploy all ES nodes| 21 | |`deploy.sh es1`|Deploy es1| 22 | |`deploy.sh es2`|Deploy es2| 23 | |`deploy.sh es3`|Deploy es3| 24 | |`deploy.sh kibana`|Deploy Kibana| 25 | |`deploy.sh logstash`|Deploy Logstash| 26 | -------------------------------------------------------------------------------- /remote/elk/deploy.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | export ANSIBLE_HOST_KEY_CHECKING=false 4 | 5 | dir=$(pwd) 6 | projectDir=(${dir//devops/devops }) 7 | roleDir="$projectDir/remote/roles" 8 | inventory="$projectDir/remote/inventory.ini" 9 | 10 | if [[ -z "$1" ]] 11 | then 12 | echo "Error: not tag specified, please specify a tag occurred in the playbook. Special tag [all] means deploy the whole ELK stack." 13 | exit 1 14 | fi 15 | 16 | 17 | if [[ "$1" = "all" ]]; 18 | then 19 | echo "Deploy the whole ELK stack as no tag sp." 20 | echo "Use [./deploy.sh tag] to deploy tagged plays, e.g. ./deploy.sh es" 21 | ansible-playbook -i ${inventory} -v elk-playbook.yml --extra-vars "roleDir=$roleDir" 22 | else 23 | echo "Deploy with specified tag: [$1]" 24 | ansible-playbook -i ${inventory} -v elk-playbook.yml --extra-vars "roleDir=$roleDir" --tags $1 25 | fi 26 | 27 | -------------------------------------------------------------------------------- /remote/elk/elk-playbook.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Deploy Elasticsearch node1 3 | hosts: es1 4 | become: yes 5 | roles: 6 | - "{{ roleDir }}/elasticsearch" 7 | tags: 8 | - es 9 | - es1 10 | 11 | - name: Deploy Elasticsearch node2 12 | hosts: es2 13 | become: yes 14 | roles: 15 | - "{{ roleDir }}/elasticsearch" 16 | tags: 17 | - es 18 | - es2 19 | 20 | - name: Deploy Elasticsearch node3 21 | hosts: es3 22 | become: yes 23 | roles: 24 | - "{{ roleDir }}/elasticsearch" 25 | tags: 26 | - es 27 | - es3 28 | 29 | 30 | - name: Deploy Kibana 31 | hosts: kibana 32 | become: yes 33 | roles: 34 | - "{{ roleDir }}/kibana" 35 | tags: 36 | - kibana 37 | 38 | - name: Deploy Logstash 39 | hosts: logstash 40 | become: yes 41 | roles: 42 | - "{{ roleDir }}/logstash" 43 | tags: 44 | - logstash 45 | -------------------------------------------------------------------------------- /remote/elk/vagrant/README.md: -------------------------------------------------------------------------------- 1 | ### Introduction 2 | - 5 nodes ubuntu1604 cluster with DHCP private network and DNS enabled 3 | - network: private(host and vm can access each other) 4 | - memory: 2048 5 | - cpu: 2 6 | - First step: install `landrush` vagrant plugin 7 | 8 | ``` bash 9 | vagrant plugin install landrush 10 | ``` 11 | - Remember to run `vagrant landrush stop` when you switch wifi 12 | - start: `./start.sh` 13 | - destroy: `./destroy.sh` 14 | - Login: `vagrant ssh nodeName` 15 | - your own public key uploaded to the vm to enable generic SSH 16 | - DNS name:`[xxx].vagrant.local` 17 | 18 | ## Node usage 19 | 20 | |Domain Name|Usage| 21 | | --- | --- | 22 | |es1.vagrant.local|ES node 1| 23 | |es2.vagrant.local|ES node 2| 24 | |es3.vagrant.local|ES node 3| 25 | |kibana.vagrant.local|Kibana| 26 | |logstash.vagrant.local|Logstash with Redis as input| -------------------------------------------------------------------------------- /remote/elk/vagrant/Vagrantfile: -------------------------------------------------------------------------------- 1 | tld="vagrant.local" 2 | 3 | Vagrant.configure("2") do |config| 4 | config.vm.box = "davenkin/ubuntu1604" 5 | config.vm.provision "file", source: "~/.ssh/id_rsa.pub", destination: "~/.ssh/my_id_rsa.pub" 6 | config.vm.provision "shell", inline: "cat ~/.ssh/my_id_rsa.pub >> ~/.ssh/authorized_keys", privileged: false 7 | 8 | config.landrush.enabled = true 9 | config.landrush.tld = tld 10 | 11 | (1..3).each do |i| 12 | config.vm.define "es#{i}" do |node| 13 | hostName="es#{i}."+tld 14 | node.vm.hostname = hostName 15 | node.vm.network "private_network", type: "dhcp" 16 | node.vm.provider "virtualbox" do |v| 17 | v.memory = 2048 18 | v.cpus = 2 19 | v.name = hostName 20 | v.customize ["modifyvm", :id, "--natdnshostresolver1", "on"] 21 | v.customize ["modifyvm", :id, "--natdnsproxy1", "on"] 22 | v.customize ["modifyvm", :id, "--nictype1", "virtio"] 23 | end 24 | end 25 | end 26 | 27 | 28 | config.vm.define "kibana" do |node| 29 | hostName="kibana."+tld 30 | node.vm.hostname = hostName 31 | node.vm.network "private_network", type: "dhcp" 32 | node.vm.provider "virtualbox" do |v| 33 | v.memory = 2048 34 | v.cpus = 2 35 | v.name = hostName 36 | v.customize ["modifyvm", :id, "--natdnshostresolver1", "on"] 37 | v.customize ["modifyvm", :id, "--natdnsproxy1", "on"] 38 | v.customize ["modifyvm", :id, "--nictype1", "virtio"] 39 | end 40 | end 41 | 42 | config.vm.define "logstash" do |node| 43 | hostName="logstash."+tld 44 | node.vm.hostname = hostName 45 | node.vm.network "private_network", type: "dhcp" 46 | node.vm.provider "virtualbox" do |v| 47 | v.memory = 2048 48 | v.cpus = 2 49 | v.name = hostName 50 | v.customize ["modifyvm", :id, "--natdnshostresolver1", "on"] 51 | v.customize ["modifyvm", :id, "--natdnsproxy1", "on"] 52 | v.customize ["modifyvm", :id, "--nictype1", "virtio"] 53 | end 54 | end 55 | 56 | 57 | end 58 | -------------------------------------------------------------------------------- /remote/elk/vagrant/destroy.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | vagrant destroy -f 3 | -------------------------------------------------------------------------------- /remote/elk/vagrant/start.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | vagrant landrush stop 4 | vagrant destroy -f 5 | 6 | vagrant up 7 | 8 | ping -c 1 es1.vagrant.local ; exit $? 9 | -------------------------------------------------------------------------------- /remote/inventory.ini: -------------------------------------------------------------------------------- 1 | [elk-all] 2 | es1 ansible_ssh_host=es1.vagrant.local ansible_user=vagrant ansible_python_interpreter=/usr/bin/python3 3 | es2 ansible_ssh_host=es2.vagrant.local ansible_user=vagrant ansible_python_interpreter=/usr/bin/python3 4 | es3 ansible_ssh_host=es3.vagrant.local ansible_user=vagrant ansible_python_interpreter=/usr/bin/python3 5 | kibana ansible_ssh_host=kibana.vagrant.local ansible_user=vagrant ansible_python_interpreter=/usr/bin/python3 6 | logstash ansible_ssh_host=logstash.vagrant.local ansible_user=vagrant ansible_python_interpreter=/usr/bin/python3 7 | 8 | [nexus-all] 9 | nexus ansible_ssh_host=nexus.vagrant.local ansible_user=vagrant 10 | -------------------------------------------------------------------------------- /remote/provision-playbook.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: "{{ host }}" 3 | vars: 4 | docker_registry_host: nexus.vagrant.local:5000 5 | docker_registry_user: changeme 6 | docker_registry_password: changeme 7 | become: yes 8 | roles: 9 | - provision 10 | -------------------------------------------------------------------------------- /remote/provision.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | if [ -z "$1" ] 4 | then 5 | echo "Error: No inventory host provided, the host should exists in inventory.ini file, example: ./provision.sh elk-es1" 6 | exit 1 7 | fi 8 | 9 | export ANSIBLE_HOST_KEY_CHECKING=false 10 | ansible-playbook -i inventory.ini -v provision-playbook.yml --extra-vars "host=$1" -v -------------------------------------------------------------------------------- /remote/roles/elasticsearch/README.md: -------------------------------------------------------------------------------- 1 | - Elasticsearch in production -------------------------------------------------------------------------------- /remote/roles/elasticsearch/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Set vm.max_map_count for ES production #otherwise error in es: max virtual memory areas vm.max_map_count [65530] is too low, increase to at least [262144] 3 | sysctl: name={{ item.key }} value={{ item.value }} 4 | with_items: 5 | - { key: "vm.max_map_count", value: "262144" } 6 | 7 | 8 | - name: Stop and remove previous ES container 9 | docker_container: 10 | name: elk-es 11 | state: absent 12 | keep_volumes: no 13 | 14 | - name: Start the new ES container 15 | docker_container: 16 | name: elk-es 17 | image: docker.elastic.co/elasticsearch/elasticsearch:7.3.0 18 | state: started 19 | restart_policy: always 20 | ports: 21 | - "9200:9200" 22 | - "9300:9300" 23 | env: 24 | cluster.name: remote-es-cluster 25 | network.host: 0.0.0.0 26 | bootstrap.memory_lock: "true" 27 | node.name: "{{ inventory_hostname }}" 28 | discovery.seed_hosts: "es1.vagrant.local,es1.vagrant.local,es1.vagrant.local" 29 | cluster.initial_master_nodes: "es1,es2,es3" 30 | network.publish_host: "{{ ansible_ssh_host }}" 31 | transport.publish_host: "{{ ansible_ssh_host }}" 32 | ES_JAVA_OPTS: "-Xms1024m -Xmx1024m" 33 | TZ: "Asia/Shanghai" 34 | ulimits: 35 | - nofile:65535:65535 # otherwise error in es: max file descriptors [4096] for elasticsearch process is too low, increase to at least [65535] 36 | - memlock:-1:-1 #otherwise error in es: memory locking requested for elasticsearch process but memory is not locked 37 | -------------------------------------------------------------------------------- /remote/roles/kibana/README.md: -------------------------------------------------------------------------------- 1 | - Kibana in production -------------------------------------------------------------------------------- /remote/roles/kibana/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Stop and remove previous Kibana container 3 | docker_container: 4 | name: elk-kibana 5 | state: absent 6 | keep_volumes: no 7 | 8 | - name: Start the new Kibana container 9 | docker_container: 10 | name: elk-kibana 11 | image: docker.elastic.co/kibana/kibana:7.3.0 12 | state: started 13 | restart_policy: always 14 | ports: 15 | - "5601:5601" 16 | env: 17 | SERVER_NAME: kibana 18 | ELASTICSEARCH_HOSTS: http://es1.vagrant.local:9200 19 | XPACK_MONITORING_ENABLED: false 20 | TZ: "Asia/Shanghai" 21 | -------------------------------------------------------------------------------- /remote/roles/logstash/README.md: -------------------------------------------------------------------------------- 1 | - Logstash in production -------------------------------------------------------------------------------- /remote/roles/logstash/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | force_remove_volumes: no 3 | -------------------------------------------------------------------------------- /remote/roles/logstash/files/logstash-docker-compose-files/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "2" 2 | services: 3 | logstash: 4 | restart: always 5 | container_name: elk-logstash 6 | image: elk-logstash:latest 7 | build: 8 | context: logstash 9 | dockerfile: Dockerfile 10 | networks: 11 | - logstash-net 12 | ports: 13 | - "12201:12201/udp" 14 | environment: 15 | LS_JAVA_OPTS: "-Xms1024m -Xmx1024m" 16 | TZ: "Asia/Shanghai" 17 | 18 | redis: 19 | restart: always 20 | container_name: elk-redis 21 | image: elk-redis:latest 22 | build: 23 | context: redis 24 | dockerfile: Dockerfile 25 | networks: 26 | - logstash-net 27 | ports: 28 | - "6379:6379" 29 | environment: 30 | TZ: "Asia/Shanghai" 31 | volumes: 32 | - redis-data:/data 33 | networks: 34 | logstash-net: 35 | driver: bridge 36 | 37 | volumes: 38 | redis-data: 39 | driver: local -------------------------------------------------------------------------------- /remote/roles/logstash/files/logstash-docker-compose-files/logstash/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM docker.elastic.co/logstash/logstash:7.3.0 2 | 3 | RUN rm -f /usr/share/logstash/pipeline/logstash.conf 4 | ADD pipeline/ /usr/share/logstash/pipeline/ 5 | ADD config/ /usr/share/logstash/config/ -------------------------------------------------------------------------------- /remote/roles/logstash/files/logstash-docker-compose-files/logstash/config/logstash.yml: -------------------------------------------------------------------------------- 1 | --- 2 | http.host: "0.0.0.0" 3 | path.config: /usr/share/logstash/pipeline 4 | -------------------------------------------------------------------------------- /remote/roles/logstash/files/logstash-docker-compose-files/logstash/pipeline/logstash.conf: -------------------------------------------------------------------------------- 1 | input { 2 | redis { 3 | host => "redis" 4 | data_type => "list" 5 | key => "redis-log" 6 | password => "changeme" 7 | codec => "json" 8 | } 9 | 10 | gelf { 11 | type => "gelf" 12 | } 13 | } 14 | 15 | 16 | filter { 17 | if [type] == "redis" { 18 | mutate { 19 | add_field => { "app" => "%{tags[0]}" } 20 | } 21 | } 22 | 23 | if [type] == "gelf" { 24 | mutate { 25 | add_field => { "app" => "%{container_name}" } 26 | } 27 | } 28 | } 29 | 30 | output { 31 | elasticsearch { hosts => ["es1.vagrant.local:9200"] } 32 | } 33 | -------------------------------------------------------------------------------- /remote/roles/logstash/files/logstash-docker-compose-files/redis/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM redis 2 | 3 | ENV REDIS_PASSWORD changeme 4 | 5 | CMD ["sh", "-c", "exec redis-server --requirepass \"$REDIS_PASSWORD\""] 6 | -------------------------------------------------------------------------------- /remote/roles/logstash/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Upload logstash docker-compose files 3 | synchronize: 4 | src: logstash-docker-compose-files 5 | dest: /tmp 6 | delete: yes 7 | recursive: yes 8 | 9 | - name: Stop existing Logstash 10 | docker_service: 11 | project_src: /tmp/logstash-docker-compose-files 12 | state: absent 13 | remove_volumes: "{{ force_remove_volumes }}" 14 | 15 | - name: Start Logstash 16 | docker_service: 17 | project_src: /tmp/logstash-docker-compose-files 18 | state: present 19 | build: yes 20 | -------------------------------------------------------------------------------- /remote/roles/provision/README.md: -------------------------------------------------------------------------------- 1 | - Every Vagrant VM needs to do provision. 2 | - The provision process contains: 3 | - docker and docker-compose 4 | - Beijing Time 5 | - Disable SELinux 6 | - SSH user with in docker/wheel group -------------------------------------------------------------------------------- /remote/roles/provision/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | docker_data_dir: /var/lib/docker 3 | -------------------------------------------------------------------------------- /remote/roles/provision/tasks/centos.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Add Docker repo 3 | get_url: 4 | url: https://download.docker.com/linux/centos/docker-ce.repo 5 | dest: /etc/yum.repos.d/docer-ce.repo 6 | become: yes 7 | 8 | 9 | - name: Install epel-release 10 | yum: 11 | name: epel-release 12 | state: present 13 | 14 | - name: Clean yum repo 15 | file: 16 | path: /var/cache/yum 17 | state: absent 18 | 19 | - name: clean 20 | shell: yum clean all 21 | args: 22 | warn: false 23 | 24 | - name: Update all packages 25 | yum: 26 | update_cache: yes 27 | name: '*' 28 | state: present 29 | 30 | - name: Remove old version of docker if exists 31 | yum: 32 | name: 33 | - docker 34 | - docker-client 35 | - docker-client-latest 36 | - docker-common 37 | - docker-latest 38 | - docker-latest-logrotate 39 | - docker-logrotate 40 | - docker-engine 41 | state: absent 42 | 43 | 44 | - name: Install docker-ce dependencies 45 | yum: 46 | name: 47 | - yum-utils 48 | - device-mapper-persistent-data 49 | - lvm2 50 | state: present 51 | 52 | - name: Install docker-ce 53 | yum: 54 | name: 55 | - docker-ce 56 | - docker-ce-cli 57 | - containerd.io 58 | state: present 59 | disable_gpg_check: yes 60 | 61 | 62 | - name: Stop docker 63 | service: 64 | name: docker 65 | state: stopped 66 | ignore_errors: yes 67 | 68 | - name: Clean docker data dir 69 | file: 70 | path: "{{ docker_data_dir }}" 71 | state: absent 72 | ignore_errors: yes 73 | 74 | - name: Make docker data dir 75 | file: 76 | path: "{{ docker_data_dir }}" 77 | state: directory 78 | mode: 0755 79 | owner: root 80 | group: root 81 | 82 | - name: Upload docker service file 83 | template: 84 | src: docker.service.j2 85 | dest: /lib/systemd/system/docker.service 86 | 87 | 88 | - name: Reload systemctl daemon and start docker 89 | systemd: 90 | name: docker 91 | state: restarted 92 | enabled: yes 93 | daemon_reload: yes 94 | 95 | - name: Install python-pip 96 | yum: 97 | name: python-pip 98 | state: present 99 | update_cache: yes 100 | 101 | - name: Try remove docker-py and docker as they results in conflicts 102 | raw: (pip uninstall -y docker-py docker ; ls) &> /dev/null 103 | 104 | - name: Install docker-compose 105 | pip: 106 | name: docker-compose 107 | state: present 108 | 109 | #==================utils================== 110 | - name: Install rsync 111 | yum: 112 | name: rsync 113 | state: present 114 | 115 | - name: Install bash-completion 116 | yum: 117 | name: bash-completion 118 | state: present 119 | 120 | - name: Install passlib 121 | pip: 122 | name: passlib 123 | state: present 124 | 125 | 126 | 127 | -------------------------------------------------------------------------------- /remote/roles/provision/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Set timezone to China Standard Time 3 | timezone: 4 | name: Asia/Shanghai 5 | 6 | - name: Make sure we have a 'wheel' group 7 | group: 8 | name: wheel 9 | state: present 10 | 11 | - name: Make sure we have a 'docker' group 12 | group: 13 | name: docker 14 | state: present 15 | 16 | - name: Allow 'wheel' group to have passwordless sudo 17 | lineinfile: 18 | dest: /etc/sudoers 19 | state: present 20 | regexp: '^%wheel' 21 | line: '%wheel ALL=(ALL) NOPASSWD: ALL' 22 | validate: 'visudo -cf %s' 23 | 24 | - name: Make current user is in wheel group for sudo permission and sudoless docker 25 | user: 26 | name: "{{ ansible_user }}" 27 | shell: /bin/bash 28 | groups: docker,wheel 29 | createhome: yes 30 | 31 | - include_tasks: centos.yml 32 | when: ansible_distribution == "CentOS" 33 | 34 | - include_tasks: ubuntu.yml 35 | when: ansible_distribution == "Ubuntu" 36 | 37 | # 38 | #- name: Login Neuxs repository 39 | # docker_login: 40 | # registry: "{{ docker_registry_host }}" 41 | # username: "{{ docker_registry_user }}" 42 | # password: "{{ docker_registry_password }}" 43 | # when: inventory_hostname != "nexus" 44 | 45 | -------------------------------------------------------------------------------- /remote/roles/provision/tasks/ubuntu.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Add aliyun main repository 3 | apt_repository: 4 | repo: deb [arch=amd64] http://mirrors.aliyun.com/ubuntu/ {{ ansible_distribution_release }} main 5 | 6 | - name: Add aliyun updates main repository 7 | apt_repository: 8 | repo: deb [arch=amd64] http://mirrors.aliyun.com/ubuntu/ {{ ansible_distribution_release }}-updates main 9 | 10 | - name: Add aliyun security main repository 11 | apt_repository: 12 | repo: deb [arch=amd64] http://mirrors.aliyun.com/ubuntu/ {{ ansible_distribution_release }}-security main 13 | 14 | - name: Add aliyun universe repository 15 | apt_repository: 16 | repo: deb [arch=amd64] http://mirrors.aliyun.com/ubuntu/ {{ ansible_distribution_release }} universe 17 | 18 | - name: Add aliyun updates universe repository 19 | apt_repository: 20 | repo: deb [arch=amd64] http://mirrors.aliyun.com/ubuntu/ {{ ansible_distribution_release }}-updates universe 21 | 22 | - name: Add aliyun security universe repository 23 | apt_repository: 24 | repo: deb [arch=amd64] http://mirrors.aliyun.com/ubuntu/ {{ ansible_distribution_release }}-security universe 25 | 26 | - name: Add docker-ce gpg key 27 | apt_key: 28 | url: http://mirrors.aliyun.com/docker-ce/linux/ubuntu/gpg 29 | state: present 30 | 31 | - name: Add aliyun docker-ce repository 32 | apt_repository: 33 | repo: deb [arch=amd64] http://mirrors.aliyun.com/docker-ce/linux/ubuntu {{ ansible_distribution_release }} stable 34 | 35 | 36 | - name: Update apt repository cache 37 | apt: 38 | update_cache: yes 39 | 40 | - name: Remove old versions of docker if exists 41 | apt: 42 | name: 43 | - docker 44 | - docker-engine 45 | - docker.io 46 | - containerd 47 | - runc 48 | state: absent 49 | 50 | - name: Install docker-ce dependencies 51 | apt: 52 | name: 53 | - apt-transport-https 54 | - ca-certificates 55 | - curl 56 | - gnupg-agent 57 | - software-properties-common 58 | - libltdl7 59 | state: present 60 | 61 | - name: Install docker-ce 62 | apt: 63 | name: 64 | - docker-ce 65 | - docker-ce-cli 66 | - containerd.io 67 | state: present 68 | 69 | - name: Stop docker 70 | service: 71 | name: docker 72 | state: stopped 73 | ignore_errors: yes 74 | 75 | 76 | - name: Clean docker data dir 77 | file: 78 | path: "{{ docker_data_dir }}" 79 | state: absent 80 | ignore_errors: yes 81 | 82 | - name: Make docker data dir 83 | file: 84 | path: "{{ docker_data_dir }}" 85 | state: directory 86 | mode: 0755 87 | owner: root 88 | group: root 89 | 90 | - name: Upload docker-ce service file 91 | template: 92 | src: docker.service.j2 93 | dest: /lib/systemd/system/docker.service 94 | 95 | - name: Reload systemctl daemon and start docker 96 | systemd: 97 | name: docker 98 | state: restarted 99 | enabled: yes 100 | daemon_reload: yes 101 | 102 | - name: Uninstall python-pip 103 | apt: 104 | name: python-pip 105 | state: absent 106 | 107 | - name: Uninstall python3-pip 108 | apt: 109 | name: python3-pip 110 | state: absent 111 | 112 | - name: Download get-pip.py 113 | get_url: url=https://bootstrap.pypa.io/get-pip.py dest=/tmp 114 | 115 | - name: Install pip for python 3 116 | command: "python3 /tmp/get-pip.py" 117 | 118 | - name: Delete get-pip.py 119 | file: state=absent path=/tmp/get-pip.py 120 | 121 | - name: Try remove docker-py and docker as they results in conflicts 122 | raw: (pip uninstall -y docker-py docker ; ls) &> /dev/null 123 | 124 | - name: Install docker-compose 125 | pip: 126 | name: docker-compose 127 | state: present 128 | 129 | #==================utils================== 130 | - name: Install rsync 131 | apt: 132 | name: rsync 133 | state: present 134 | 135 | - name: Install bash-completion 136 | apt: 137 | name: bash-completion 138 | state: present 139 | 140 | - name: Install passlib 141 | pip: 142 | name: passlib 143 | state: present 144 | 145 | -------------------------------------------------------------------------------- /remote/roles/provision/templates/docker.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Docker Daemon 3 | After=network.target 4 | 5 | 6 | [Service] 7 | ExecStart=/usr/bin/dockerd -p /var/run/docker.pid -g {{ docker_data_dir }} -s overlay --insecure-registry {{ docker_registry_host }} --registry-mirror=https://registry.docker-cn.com 8 | Type=simple 9 | Restart=always 10 | 11 | [Install] 12 | WantedBy=multi-user.target 13 | --------------------------------------------------------------------------------