├── .env ├── .gitattributes ├── .github ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── FUNDING.yml ├── ISSUE_TEMPLATE │ ├── bug_report.md │ ├── feature_request.md │ └── question.md ├── PULL_REQUEST_TEMPLATE.md ├── SECURITY.md ├── auto-release.yml └── workflows │ ├── auto-release.yml │ └── build.yml ├── .gitignore ├── LICENSE ├── Makefile ├── README.md ├── apm-server ├── Dockerfile └── config │ └── apm-server.yml ├── docker-compose.logs.yml ├── docker-compose.monitor.yml ├── docker-compose.nodes.yml ├── docker-compose.setup.yml ├── docker-compose.yml ├── elasticsearch ├── Dockerfile ├── config │ ├── elasticsearch.yml │ └── log4j2.properties └── scripts │ └── docker-healthcheck ├── filebeat ├── filebeat.docker.logs.yml └── filebeat.monitoring.yml ├── kibana ├── Dockerfile └── config │ └── kibana.yml ├── logstash ├── Dockerfile ├── config │ ├── logstash.yml │ └── pipelines.yml └── pipeline │ └── main.conf ├── secrets ├── certs │ └── .gitkeep └── keystore │ └── .gitkeep └── setup ├── instances.yml ├── keystore.sh ├── setup-certs.sh ├── setup-keystore.sh └── upgrade-keystore.sh /.env: -------------------------------------------------------------------------------- 1 | COMPOSE_PROJECT_NAME=elastic 2 | ELK_VERSION=8.10.2 3 | 4 | #----------- Images to use ----------------------# 5 | 6 | # the following images will be appended with the ELK_VERSION version number 7 | ELASTICSEARCH_IMAGE_NAME=elastdocker/elasticsearch 8 | LOGSTASH_IMAGE_NAME=elastdocker/logstash 9 | KIBANA_IMAGE_NAME=elastdocker/kibana 10 | APM_SERVER_IMAGE_NAME=elastdocker/apm-server 11 | FILEBEAT_IMAGE_NAME=docker.elastic.co/beats/filebeat 12 | 13 | # the following images will be used as is 14 | ELASTICSEARCH_EXPORTER_IMAGE=justwatch/elasticsearch_exporter:1.1.0 15 | LOGSTASH_EXPORTER_IMAGE=alxrem/prometheus-logstash-exporter 16 | 17 | #----------- Resources --------------------------# 18 | ELASTICSEARCH_HEAP=1024m 19 | LOGSTASH_HEAP=512m 20 | 21 | #----------- Hosts and Ports --------------------# 22 | # To be able to further "de-compose" the compose files, get hostnames from environment variables instead. 23 | 24 | ELASTICSEARCH_HOST=elasticsearch 25 | ELASTICSEARCH_PORT=9200 26 | 27 | KIBANA_HOST=kibana 28 | KIBANA_PORT=5601 29 | 30 | LOGSTASH_HOST=logstash 31 | 32 | APMSERVER_HOST=apm-server 33 | APMSERVER_PORT=8200 34 | 35 | #----------- Credientals ------------------------# 36 | # Username & Password for Admin Elasticsearch cluster. 37 | # This is used to set the password at setup, and used by others to connect to Elasticsearch at runtime. 38 | # USERNAME cannot be changed! It is set here for parmeterization only. 39 | ELASTIC_USERNAME=elastic 40 | ELASTIC_PASSWORD=changeme 41 | AWS_ACCESS_KEY_ID=nottherealid 42 | AWS_SECRET_ACCESS_KEY=notherealsecret 43 | ELASTIC_APM_SECRET_TOKEN=secrettokengoeshere 44 | 45 | #----------- Cluster ----------------------------# 46 | ELASTIC_CLUSTER_NAME=elastdocker-cluster 47 | ELASTIC_INIT_MASTER_NODE=elastdocker-node-0 48 | ELASTIC_NODE_NAME=elastdocker-node-0 49 | 50 | # Hostnames of master eligble elasticsearch instances. (matches compose generated host name) 51 | ELASTIC_DISCOVERY_SEEDS=elasticsearch 52 | 53 | #----------- For Multinode Cluster --------------# 54 | # Other nodes 55 | ELASTIC_NODE_NAME_1=elastdocker-node-1 56 | ELASTIC_NODE_NAME_2=elastdocker-node-2 57 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | # Declare files that will always have LF line endings on checkout. 2 | docker-healthcheck text eol=lf 3 | *.sh text eol=lf 4 | setup/*.sh linguist-language=Dockerfile 5 | Makefile linguist-vendored 6 | -------------------------------------------------------------------------------- /.github/CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Covenant Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | In the interest of fostering an open and welcoming environment, we as 6 | contributors and maintainers pledge to making participation in our project and 7 | our community a harassment-free experience for everyone, regardless of age, body 8 | size, disability, ethnicity, sex characteristics, gender identity and expression, 9 | level of experience, education, socio-economic status, nationality, personal 10 | appearance, race, religion, or sexual identity and orientation. 11 | 12 | ## Our Standards 13 | 14 | Examples of behavior that contributes to creating a positive environment 15 | include: 16 | 17 | * Using welcoming and inclusive language 18 | * Being respectful of differing viewpoints and experiences 19 | * Gracefully accepting constructive criticism 20 | * Focusing on what is best for the community 21 | * Showing empathy towards other community members 22 | 23 | Examples of unacceptable behavior by participants include: 24 | 25 | * The use of sexualized language or imagery and unwelcome sexual attention or 26 | advances 27 | * Trolling, insulting/derogatory comments, and personal or political attacks 28 | * Public or private harassment 29 | * Publishing others' private information, such as a physical or electronic 30 | address, without explicit permission 31 | * Other conduct which could reasonably be considered inappropriate in a 32 | professional setting 33 | 34 | ## Our Responsibilities 35 | 36 | Project maintainers are responsible for clarifying the standards of acceptable 37 | behavior and are expected to take appropriate and fair corrective action in 38 | response to any instances of unacceptable behavior. 39 | 40 | Project maintainers have the right and responsibility to remove, edit, or 41 | reject comments, commits, code, wiki edits, issues, and other contributions 42 | that are not aligned to this Code of Conduct, or to ban temporarily or 43 | permanently any contributor for other behaviors that they deem inappropriate, 44 | threatening, offensive, or harmful. 45 | 46 | ## Scope 47 | 48 | This Code of Conduct applies both within project spaces and in public spaces 49 | when an individual is representing the project or its community. Examples of 50 | representing a project or community include using an official project e-mail 51 | address, posting via an official social media account, or acting as an appointed 52 | representative at an online or offline event. Representation of a project may be 53 | further defined and clarified by project maintainers. 54 | 55 | ## Enforcement 56 | 57 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 58 | reported by contacting the project team at sherifabdlnaby@gmail.com. All 59 | complaints will be reviewed and investigated and will result in a response that 60 | is deemed necessary and appropriate to the circumstances. The project team is 61 | obligated to maintain confidentiality with regard to the reporter of an incident. 62 | Further details of specific enforcement policies may be posted separately. 63 | 64 | Project maintainers who do not follow or enforce the Code of Conduct in good 65 | faith may face temporary or permanent repercussions as determined by other 66 | members of the project's leadership. 67 | 68 | ## Attribution 69 | 70 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, 71 | available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html 72 | 73 | [homepage]: https://www.contributor-covenant.org 74 | 75 | For answers to common questions about this code of conduct, see 76 | https://www.contributor-covenant.org/faq 77 | -------------------------------------------------------------------------------- /.github/CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | 2 | # Contributor Covenant Code of Conduct 3 | 4 | ## Our Pledge 5 | 6 | In the interest of fostering an open and welcoming environment, we as 7 | contributors and maintainers pledge to make participation in our project and 8 | our community a harassment-free experience for everyone, regardless of age, body 9 | size, disability, ethnicity, sex characteristics, gender identity and expression, 10 | level of experience, education, socio-economic status, nationality, personal 11 | appearance, race, religion, or sexual identity and orientation. 12 | 13 | ## Our Standards 14 | 15 | Examples of behavior that contributes to creating a positive environment 16 | include: 17 | 18 | * Using welcoming and inclusive language 19 | * Being respectful of differing viewpoints and experiences 20 | * Gracefully accepting constructive criticism 21 | * Focusing on what is best for the community 22 | * Showing empathy towards other community members 23 | 24 | Examples of unacceptable behavior by participants include: 25 | 26 | * The use of sexualized language or imagery and unwelcome sexual attention or 27 | advances 28 | * Trolling, insulting/derogatory comments, and personal or political attacks 29 | * Public or private harassment 30 | * Publishing others' private information, such as a physical or electronic 31 | address, without explicit permission 32 | * Other conduct which could reasonably be considered inappropriate in a 33 | professional setting 34 | 35 | ## Our Responsibilities 36 | 37 | Project maintainers are responsible for clarifying the standards of acceptable 38 | behavior and are expected to take appropriate and fair corrective action in 39 | response to any instances of unacceptable behavior. 40 | 41 | Project maintainers have the right and responsibility to remove, edit, or 42 | reject comments, commits, code, wiki edits, issues, and other contributions 43 | that are not aligned to this Code of Conduct, or to ban temporarily or 44 | permanently any contributor for other behaviors that they deem inappropriate, 45 | threatening, offensive, or harmful. 46 | 47 | ## Scope 48 | 49 | This Code of Conduct applies within all project spaces, and it also applies when 50 | an individual is representing the project or its community in public spaces. 51 | Examples of representing a project or community include using an official 52 | project e-mail address, posting via an official social media account, or acting 53 | as an appointed representative at an online or offline event. Representation of 54 | a project may be further defined and clarified by project maintainers. 55 | 56 | ## Enforcement 57 | 58 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 59 | reported by contacting the project team at [INSERT EMAIL ADDRESS]. All 60 | complaints will be reviewed and investigated and will result in a response that 61 | is deemed necessary and appropriate to the circumstances. The project team is 62 | obligated to maintain confidentiality with regard to the reporter of an incident. 63 | Further details of specific enforcement policies may be posted separately. 64 | 65 | Project maintainers who do not follow or enforce the Code of Conduct in good 66 | faith may face temporary or permanent repercussions as determined by other 67 | members of the project's leadership. 68 | 69 | ## Attribution 70 | 71 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, 72 | available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html 73 | 74 | [homepage]: https://www.contributor-covenant.org 75 | 76 | For answers to common questions about this code of conduct, see 77 | https://www.contributor-covenant.org/faq 78 | -------------------------------------------------------------------------------- /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | # These are supported funding model platforms 2 | 3 | github: sherifabdlnaby 4 | patreon: # Replace with a single Patreon username 5 | open_collective: # Replace with a single Open Collective username 6 | ko_fi: # Replace with a single Ko-fi username 7 | tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel 8 | community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry 9 | liberapay: # Replace with a single Liberapay username 10 | issuehunt: # Replace with a single IssueHunt username 11 | otechie: # Replace with a single Otechie username 12 | custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2'] 13 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: 'bug' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Describe the bug** 11 | A clear and concise description of what the bug is. 12 | 13 | **To Reproduce** 14 | Steps to reproduce the behavior: 15 | 1. Go to '...' 16 | 2. Click on '....' 17 | 3. Scroll down to '....' 18 | 4. See error 19 | 20 | **Expected behavior** 21 | A clear and concise description of what you expected to happen. 22 | 23 | **Screenshots** 24 | If applicable, add screenshots to help explain your problem. 25 | 26 | **Desktop (please complete the following information):** 27 | - OS: [e.g. iOS] 28 | - Browser [e.g. chrome, safari] 29 | - Version [e.g. 22] 30 | 31 | **Smartphone (please complete the following information):** 32 | - Device: [e.g. iPhone6] 33 | - OS: [e.g. iOS8.1] 34 | - Browser [e.g. stock browser, safari] 35 | - Version [e.g. 22] 36 | 37 | **Additional context** 38 | Add any other context about the problem here. 39 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: 'feature request' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Is your feature request related to a problem? Please describe.** 11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 12 | 13 | **Describe the solution you'd like** 14 | A clear and concise description of what you want to happen. 15 | 16 | **Describe alternatives you've considered** 17 | A clear and concise description of any alternative solutions or features you've considered. 18 | 19 | **Additional context** 20 | Add any other context or screenshots about the feature request here. 21 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/question.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Question 3 | about: Ask a Question 4 | title: '' 5 | labels: 'question' 6 | assignees: '' 7 | 8 | --- 9 | 10 | Ask a question... 11 | 12 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | (Thanks for sending a pull request! Please make sure you click the link above to view the contribution guidelines, then fill out the blanks below.) 2 | 3 | What does this implement/fix? Explain your changes. 4 | --------------------------------------------------- 5 | … 6 | 7 | Does this close any currently open issues? 8 | ------------------------------------------ 9 | … 10 | 11 | 12 | Any relevant logs, error output, etc? 13 | ------------------------------------- 14 | (If it’s long, please paste to https://ghostbin.com/ and insert the link here.) 15 | 16 | Any other comments? 17 | ------------------- 18 | … 19 | 20 | Where has this been tested? 21 | --------------------------- 22 | … 23 | -------------------------------------------------------------------------------- /.github/SECURITY.md: -------------------------------------------------------------------------------- 1 | # Security Policy 2 | 3 | ## Supported Versions 4 | 5 | Use this section to tell people about which versions of your project are 6 | currently being supported with security updates. 7 | 8 | | Version | Supported | 9 | | ------- | ------------------ | 10 | | 5.1.x | :white_check_mark: | 11 | | 5.0.x | :x: | 12 | | 4.0.x | :white_check_mark: | 13 | | < 4.0 | :x: | 14 | 15 | ## Reporting a Vulnerability 16 | 17 | Use this section to tell people how to report a vulnerability. 18 | 19 | Tell them where to go, how often they can expect to get an update on a 20 | reported vulnerability, what to expect if the vulnerability is accepted or 21 | declined, etc. 22 | -------------------------------------------------------------------------------- /.github/auto-release.yml: -------------------------------------------------------------------------------- 1 | name-template: 'v$RESOLVED_VERSION 🚀' 2 | tag-template: 'v$RESOLVED_VERSION' 3 | version-template: '$MAJOR.$MINOR.$PATCH' 4 | version-resolver: 5 | major: 6 | labels: 7 | - 'major' 8 | minor: 9 | labels: 10 | - 'minor' 11 | - 'enhancement' 12 | - 'feature' 13 | - 'dependency-update' 14 | patch: 15 | labels: 16 | - 'auto-update' 17 | - 'patch' 18 | - 'fix' 19 | - 'chore' 20 | - 'bugfix' 21 | - 'bug' 22 | - 'hotfix' 23 | default: 'patch' 24 | 25 | categories: 26 | - title: '🚀 Enhancements' 27 | labels: 28 | - 'enhancement' 29 | - 'feature' 30 | - 'patch' 31 | - title: '⬆️ Upgrades' 32 | labels: 33 | - 'upgrades' 34 | - title: '🐛 Bug Fixes' 35 | labels: 36 | - 'fix' 37 | - 'bugfix' 38 | - 'bug' 39 | - 'hotfix' 40 | - title: '🤖 Automatic Updates' 41 | labels: 42 | - 'auto-update' 43 | - title: '📝 Documentation' 44 | labels: 45 | - 'docs' 46 | 47 | autolabeler: 48 | - label: 'docs' 49 | files: 50 | - '*.md' 51 | - label: 'enhancement' 52 | title: '/enhancement|fixes/i' 53 | 54 | - label: 'upgrades' 55 | title: '/⬆️/i' 56 | 57 | - label: 'bugfix' 58 | title: '/bugfix/i' 59 | 60 | - label: 'bug' 61 | title: '/🐛|🐞|bug/i' 62 | 63 | - label: 'auto-update' 64 | title: '/🤖/i' 65 | 66 | - label: 'feature' 67 | title: '/🚀|🎉/i' 68 | 69 | change-template: | 70 |
71 | $TITLE @$AUTHOR (#$NUMBER) 72 | 73 | $BODY 74 |
75 | 76 | template: | 77 | ## Changes 78 | 79 | $CHANGES 80 | -------------------------------------------------------------------------------- /.github/workflows/auto-release.yml: -------------------------------------------------------------------------------- 1 | name: auto-release 2 | 3 | on: 4 | push: 5 | # branches to consider in the event; optional, defaults to all 6 | branches: 7 | - main 8 | # pull_request event is required only for autolabeler 9 | pull_request: 10 | # Only following types are handled by the action, but one can default to all as well 11 | types: [ opened, reopened, synchronize ] 12 | 13 | jobs: 14 | publish: 15 | runs-on: ubuntu-latest 16 | steps: 17 | - uses: actions/checkout@v2 18 | # Drafts your next Release notes as Pull Requests are merged into "main" 19 | - uses: release-drafter/release-drafter@v5 20 | with: 21 | publish: false 22 | prerelease: true 23 | config-name: auto-release.yml 24 | # allows autolabeler to run without unmerged PRs from being added to draft 25 | disable-releaser: ${{ github.ref_name != 'main' }} 26 | env: 27 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 28 | -------------------------------------------------------------------------------- /.github/workflows/build.yml: -------------------------------------------------------------------------------- 1 | # This workflow uses actions that are not certified by GitHub. 2 | # They are provided by a third-party and are governed by 3 | # separate terms of service, privacy policy, and support 4 | # documentation. 5 | 6 | # A sample workflow which checks out the code, builds a container 7 | # image using Docker and scans that image for vulnerabilities using 8 | # Snyk. The results are then uploaded to GitHub Security Code Scanning 9 | # 10 | # For more examples, including how to limit scans to only high-severity 11 | # issues, monitor images for newly disclosed vulnerabilities in Snyk and 12 | # fail PR checks for new vulnerabilities, see https://github.com/snyk/actions/ 13 | 14 | name: Build 15 | on: 16 | push: 17 | branches: [ main ] 18 | pull_request: 19 | # The branches below must be a subset of the branches above 20 | branches: [ main ] 21 | 22 | jobs: 23 | Run: 24 | runs-on: ubuntu-latest 25 | steps: 26 | - uses: actions/checkout@v2 27 | - name: Build & Deploy 28 | run: make setup && make up 29 | - name: Test Elasticsearch 30 | run: timeout 240s sh -c "until curl https://elastic:changeme@localhost:9200 --insecure --silent; do echo 'Elasticsearch Not Up, Retrying...'; sleep 3; done" && echo 'Elasticsearch is up' 31 | - name: Test Kibana 32 | run: timeout 240s sh -c "until curl https://localhost:5601 --insecure --silent -I; do echo 'Kibana Not Ready, Retrying...'; sleep 3; done" && echo 'Kibana is up' 33 | 34 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .idea/ 2 | .DS_Store 3 | /secrets 4 | tools/elastalert/rules/* -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2022 Sherif Abdel-Naby 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .DEFAULT_GOAL:=help 2 | 3 | include .env 4 | 5 | COMPOSE_ALL_FILES := -f docker-compose.yml -f docker-compose.monitor.yml -f docker-compose.nodes.yml -f docker-compose.logs.yml 6 | COMPOSE_MONITORING := -f docker-compose.yml -f docker-compose.monitor.yml 7 | COMPOSE_LOGGING := -f docker-compose.yml -f docker-compose.logs.yml 8 | COMPOSE_NODES := -f docker-compose.yml -f docker-compose.nodes.yml 9 | ELK_SERVICES := elasticsearch logstash kibana apm-server 10 | ELK_LOG_COLLECTION := filebeat 11 | ELK_MONITORING := elasticsearch-exporter logstash-exporter filebeat-cluster-logs 12 | ELK_NODES := elasticsearch-1 elasticsearch-2 13 | ELK_MAIN_SERVICES := ${ELK_SERVICES} ${ELK_MONITORING} 14 | ELK_ALL_SERVICES := ${ELK_MAIN_SERVICES} ${ELK_NODES} ${ELK_LOG_COLLECTION} 15 | 16 | compose_v2_not_supported = $(shell command docker compose 2> /dev/null) 17 | ifeq (,$(compose_v2_not_supported)) 18 | DOCKER_COMPOSE_COMMAND = docker-compose 19 | else 20 | DOCKER_COMPOSE_COMMAND = docker compose 21 | endif 22 | 23 | # -------------------------- 24 | .PHONY: setup keystore certs all elk monitoring build down stop restart rm logs 25 | 26 | keystore: ## Setup Elasticsearch Keystore, by initializing passwords, and add credentials defined in `keystore.sh`. 27 | $(DOCKER_COMPOSE_COMMAND) -f docker-compose.setup.yml run --rm keystore 28 | 29 | upgrade-keystore: ## Upgrade Elasticsearch Keystore, which is necessary when upgrading to an Elasticsearch version that uses a newer Java version. 30 | @if [ -n "$$($(DOCKER_COMPOSE_COMMAND) ps -q)" ]; then \ 31 | echo "Please stop all running containers before upgrading the keystore."; \ 32 | exit 1; \ 33 | fi 34 | $(DOCKER_COMPOSE_COMMAND) -f docker-compose.setup.yml run --rm upgrade-keystore 35 | 36 | certs: ## Generate Elasticsearch SSL Certs. 37 | $(DOCKER_COMPOSE_COMMAND) -f docker-compose.setup.yml run --rm certs 38 | 39 | setup: ## Generate Elasticsearch SSL Certs and Keystore. 40 | @make certs 41 | @make keystore 42 | 43 | all: ## Start Elk and all its component (ELK, Monitoring, and Tools). 44 | $(DOCKER_COMPOSE_COMMAND) ${COMPOSE_ALL_FILES} up -d --build ${ELK_MAIN_SERVICES} 45 | 46 | elk: ## Start ELK. 47 | $(DOCKER_COMPOSE_COMMAND) up -d --build 48 | 49 | up: 50 | @make elk 51 | @echo "Visit Kibana: https://localhost:5601 (user: elastic, password: changeme) [Unless you changed values in .env]" 52 | 53 | monitoring: ## Start ELK Monitoring. 54 | $(DOCKER_COMPOSE_COMMAND) ${COMPOSE_MONITORING} up -d --build ${ELK_MONITORING} 55 | 56 | collect-docker-logs: ## Start Filebeat that collects all Host Docker Logs and ship it to ELK 57 | $(DOCKER_COMPOSE_COMMAND) ${COMPOSE_LOGGING} up -d --build ${ELK_LOG_COLLECTION} 58 | 59 | nodes: ## Start Two Extra Elasticsearch Nodes 60 | $(DOCKER_COMPOSE_COMMAND) ${COMPOSE_NODES} up -d --build ${ELK_NODES} 61 | 62 | build: ## Build ELK and all its extra components. 63 | $(DOCKER_COMPOSE_COMMAND) ${COMPOSE_ALL_FILES} build ${ELK_ALL_SERVICES} 64 | ps: ## Show all running containers. 65 | $(DOCKER_COMPOSE_COMMAND) ${COMPOSE_ALL_FILES} ps 66 | 67 | down: ## Down ELK and all its extra components. 68 | $(DOCKER_COMPOSE_COMMAND) ${COMPOSE_ALL_FILES} down 69 | 70 | stop: ## Stop ELK and all its extra components. 71 | $(DOCKER_COMPOSE_COMMAND) ${COMPOSE_ALL_FILES} stop ${ELK_ALL_SERVICES} 72 | 73 | restart: ## Restart ELK and all its extra components. 74 | $(DOCKER_COMPOSE_COMMAND) ${COMPOSE_ALL_FILES} restart ${ELK_ALL_SERVICES} 75 | 76 | rm: ## Remove ELK and all its extra components containers. 77 | $(DOCKER_COMPOSE_COMMAND) $(COMPOSE_ALL_FILES) rm -f ${ELK_ALL_SERVICES} 78 | 79 | logs: ## Tail all logs with -n 1000. 80 | $(DOCKER_COMPOSE_COMMAND) $(COMPOSE_ALL_FILES) logs --follow --tail=1000 ${ELK_ALL_SERVICES} 81 | 82 | images: ## Show all Images of ELK and all its extra components. 83 | $(DOCKER_COMPOSE_COMMAND) $(COMPOSE_ALL_FILES) images ${ELK_ALL_SERVICES} 84 | 85 | prune: ## Remove ELK Containers and Delete ELK-related Volume Data (the elastic_elasticsearch-data volume) 86 | @make stop && make rm 87 | @docker volume prune -f --filter label=com.docker.compose.project=${COMPOSE_PROJECT_NAME} 88 | 89 | help: ## Show this help. 90 | @echo "Make Application Docker Images and Containers using Docker-Compose files in 'docker' Dir." 91 | @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m (default: help)\n\nTargets:\n"} /^[a-zA-Z_-]+:.*?##/ { printf " \033[36m%-12s\033[0m %s\n", $$1, $$2 }' $(MAKEFILE_LIST) 92 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 |

2 | 3 |

4 |

Elastic Stack on Docker

5 |

Preconfigured Security, Tools, and Self-Monitoring

6 |

Configured to be ready to be used for Log, Metrics, APM, Alerting, Machine Learning, and Security (SIEM) usecases.

7 |

8 | 9 | Elastic Stack Version 7^^ 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | contributions welcome 22 | 23 | 24 | GitHub forks 25 | 26 | 27 | GitHub issues 28 | 29 | 30 | GitHub license 31 | 32 |

33 | 34 | # Introduction 35 | Elastic Stack (**ELK**) Docker Composition, preconfigured with **Security**, **Monitoring**, and **Tools**; Up with a Single Command. 36 | 37 | Suitable for Demoing, MVPs and small production deployments. 38 | 39 | Stack Version: [8.10.2](https://www.elastic.co/blog/whats-new-elastic-8-10-0) 🎉 - Based on [Official Elastic Docker Images](https://www.docker.elastic.co/) 40 | > You can change Elastic Stack version by setting `ELK_VERSION` in `.env` file and rebuild your images. Any version >= 8.0.0 is compatible with this template. 41 | 42 | ### Main Features 📜 43 | 44 | - Configured as a Production Single Node Cluster. (With a multi-node cluster option for experimenting). 45 | - Security Enabled By Default. 46 | - Configured to Enable: 47 | - Logging & Metrics Ingestion 48 | - Option to collect logs of all Docker Containers running on the host. via `make collect-docker-logs`. 49 | - APM 50 | - Alerting 51 | - Machine Learning 52 | - Anomaly Detection 53 | - SIEM (Security information and event management). 54 | - Enabling Trial License 55 | - Use Docker-Compose and `.env` to configure your entire stack parameters. 56 | - Persist Elasticsearch's Keystore and SSL Certifications. 57 | - Self-Monitoring Metrics Enabled. 58 | - Prometheus Exporters for Stack Metrics. 59 | - Embedded Container Healthchecks for Stack Images. 60 | 61 | #### More points 62 | And comparing Elastdocker and the popular [deviantony/docker-elk](https://github.com/deviantony/docker-elk) 63 | 64 |
Expand... 65 |

66 | 67 | One of the most popular ELK on Docker repositories is the awesome [deviantony/docker-elk](https://github.com/deviantony/docker-elk). 68 | Elastdocker differs from `deviantony/docker-elk` in the following points. 69 | 70 | - Security enabled by default using Basic license, not Trial. 71 | 72 | - Persisting data by default in a volume. 73 | 74 | - Run in Production Mode (by enabling SSL on Transport Layer, and add initial master node settings). 75 | 76 | - Persisting Generated Keystore, and create an extendable script that makes it easier to recreate it every-time the container is created. 77 | 78 | - Parameterize credentials in .env instead of hardcoding `elastich:changeme` in every component config. 79 | 80 | - Parameterize all other Config like Heap Size. 81 | 82 | - Add recommended environment configurations as Ulimits and Swap disable to the docker-compose. 83 | 84 | - Make it ready to be extended into a multinode cluster. 85 | 86 | - Configuring the Self-Monitoring and the Filebeat agent that ship ELK logs to ELK itself. (as a step to shipping it to a monitoring cluster in the future). 87 | 88 | - Configured Prometheus Exporters. 89 | 90 | - The Makefile that simplifies everything into some simple commands. 91 | 92 |

93 |
94 | 95 | ----- 96 | 97 | # Requirements 98 | 99 | - [Docker 20.05 or higher](https://docs.docker.com/install/) 100 | - [Docker-Compose 1.29 or higher](https://docs.docker.com/compose/install/) 101 | - 4GB RAM (For Windows and MacOS make sure Docker's VM has more than 4GB+ memory.) 102 | 103 | # Setup 104 | 105 | 1. Clone the Repository 106 | ```bash 107 | git clone https://github.com/sherifabdlnaby/elastdocker.git 108 | ``` 109 | 2. Initialize Elasticsearch Keystore and TLS Self-Signed Certificates 110 | ```bash 111 | $ make setup 112 | ``` 113 | > **For Linux's docker hosts only**. By default virtual memory [is not enough](https://www.elastic.co/guide/en/elasticsearch/reference/current/vm-max-map-count.html) so run the next command as root `sysctl -w vm.max_map_count=262144` 114 | 3. Start Elastic Stack 115 | ```bash 116 | $ make elk $ docker-compose up -d $ docker compose up -d 117 | ``` 118 | 4. Visit Kibana at [https://localhost:5601](https://localhost:5601) or `https://:5601` 119 | 120 | Default Username: `elastic`, Password: `changeme` 121 | 122 | > - Notice that Kibana is configured to use HTTPS, so you'll need to write `https://` before `localhost:5601` in the browser. 123 | > - Modify `.env` file for your needs, most importantly `ELASTIC_PASSWORD` that setup your superuser `elastic`'s password, `ELASTICSEARCH_HEAP` & `LOGSTASH_HEAP` for Elasticsearch & Logstash Heap Size. 124 | 125 | > Whatever your Host (e.g AWS EC2, Azure, DigitalOcean, or on-premise server), once you expose your host to the network, ELK component will be accessible on their respective ports. Since the enabled TLS uses a self-signed certificate, it is recommended to SSL-Terminate public traffic using your signed certificates. 126 | 127 | > 🏃🏻‍♂️ To start ingesting logs, you can start by running `make collect-docker-logs` which will collect your host's container logs. 128 | 129 | ## Additional Commands 130 | 131 |
Expand 132 |

133 | 134 | #### To Start Monitoring and Prometheus Exporters 135 | ```shell 136 | $ make monitoring 137 | ``` 138 | #### To Ship Docker Container Logs to ELK 139 | ```shell 140 | $ make collect-docker-logs 141 | ``` 142 | #### To Start **Elastic Stack, Tools and Monitoring** 143 | ``` 144 | $ make all 145 | ``` 146 | #### To Start 2 Extra Elasticsearch nodes (recommended for experimenting only) 147 | ```shell 148 | $ make nodes 149 | ``` 150 | #### To Rebuild Images 151 | ```shell 152 | $ make build 153 | ``` 154 | #### Bring down the stack. 155 | ```shell 156 | $ make down 157 | ``` 158 | 159 | #### Reset everything, Remove all containers, and delete **DATA**! 160 | ```shell 161 | $ make prune 162 | ``` 163 | 164 |

165 |
166 | 167 | # Configuration 168 | 169 | * Some Configuration are parameterized in the `.env` file. 170 | * `ELASTIC_PASSWORD`, user `elastic`'s password (default: `changeme` _pls_). 171 | * `ELK_VERSION` Elastic Stack Version (default: `8.10.2`) 172 | * `ELASTICSEARCH_HEAP`, how much Elasticsearch allocate from memory (default: 1GB -good for development only-) 173 | * `LOGSTASH_HEAP`, how much Logstash allocate from memory. 174 | * Other configurations which their such as cluster name, and node name, etc. 175 | * Elasticsearch Configuration in `elasticsearch.yml` at `./elasticsearch/config`. 176 | * Logstash Configuration in `logstash.yml` at `./logstash/config/logstash.yml`. 177 | * Logstash Pipeline in `main.conf` at `./logstash/pipeline/main.conf`. 178 | * Kibana Configuration in `kibana.yml` at `./kibana/config`. 179 | 180 | ### Setting Up Keystore 181 | 182 | You can extend the Keystore generation script by adding keys to `./setup/keystore.sh` script. (e.g Add S3 Snapshot Repository Credentials) 183 | 184 | To Re-generate Keystore: 185 | ``` 186 | make keystore 187 | ``` 188 | 189 | ### Notes 190 | 191 | 192 | - ⚠️ Elasticsearch HTTP layer is using SSL, thus mean you need to configure your elasticsearch clients with the `CA` in `secrets/certs/ca/ca.crt`, or configure client to ignore SSL Certificate Verification (e.g `--insecure` in `curl`). 193 | 194 | - Adding Two Extra Nodes to the cluster will make the cluster depending on them and won't start without them again. 195 | 196 | - Makefile is a wrapper around `Docker-Compose` commands, use `make help` to know every command. 197 | 198 | - Elasticsearch will save its data to a volume named `elasticsearch-data` 199 | 200 | - Elasticsearch Keystore (that contains passwords and credentials) and SSL Certificate are generated in the `./secrets` directory by the setup command. 201 | 202 | - Make sure to run `make setup` if you changed `ELASTIC_PASSWORD` and to restart the stack afterwards. 203 | 204 | - For Linux Users it's recommended to set the following configuration (run as `root`) 205 | ``` 206 | sysctl -w vm.max_map_count=262144 207 | ``` 208 | By default, Virtual Memory [is not enough](https://www.elastic.co/guide/en/elasticsearch/reference/current/vm-max-map-count.html). 209 | 210 | --------------------------- 211 | 212 | ![Intro](https://user-images.githubusercontent.com/16992394/156664447-c24c49f4-4282-4d6a-81a7-10743cfa384e.png) 213 | ![Alerting](https://user-images.githubusercontent.com/16992394/156664848-d14f5e58-8f80-497d-a841-914c05a4b69c.png) 214 | ![Maps](https://user-images.githubusercontent.com/16992394/156664562-d38e11ee-b033-4b91-80bd-3a866ad65f56.png) 215 | ![ML](https://user-images.githubusercontent.com/16992394/156664695-5c1ed4a7-82f3-47a6-ab5c-b0ce41cc0fbe.png) 216 | 217 | # Working with Elastic APM 218 | 219 | After completing the setup step, you will notice a container named apm-server which gives you deeper visibility into your applications and can help you to identify and resolve root cause issues with correlated traces, logs, and metrics. 220 | 221 | ## Authenticating with Elastic APM 222 | 223 | In order to authenticate with Elastic APM, you will need the following: 224 | 225 | - The value of `ELASTIC_APM_SECRET_TOKEN` defined in `.env` file as we have [secret token](https://www.elastic.co/guide/en/apm/guide/master/secret-token.html) enabled by default 226 | - The ability to reach port `8200` 227 | - Install elastic apm client in your application e.g. for NodeJS based applications you need to install [elastic-apm-node](https://www.elastic.co/guide/en/apm/agent/nodejs/master/typescript.html) 228 | - Import the package in your application and call the start function, In case of NodeJS based application you can do the following: 229 | 230 | ``` 231 | const apm = require('elastic-apm-node').start({ 232 | serviceName: 'foobar', 233 | secretToken: process.env.ELASTIC_APM_SECRET_TOKEN, 234 | 235 | // https is enabled by default as per elastdocker configuration 236 | serverUrl: 'https://localhost:8200', 237 | }) 238 | ``` 239 | > Make sure that the agent is started before you require any other modules in your Node.js application - i.e. before express, http, etc. as mentioned in [Elastic APM Agent - NodeJS initialization](https://www.elastic.co/guide/en/apm/agent/nodejs/master/express.html#express-initialization) 240 | 241 | For more details or other languages you can check the following: 242 | - [APM Agents in different languages](https://www.elastic.co/guide/en/apm/agent/index.html) 243 | 244 | # Monitoring The Cluster 245 | 246 | ### Via Self-Monitoring 247 | 248 | Head to Stack Monitoring tab in Kibana to see cluster metrics for all stack components. 249 | 250 | ![Overview](https://user-images.githubusercontent.com/16992394/156664539-cc7e1a69-f1aa-4aca-93f6-7aedaabedd2c.png) 251 | ![Moniroting](https://user-images.githubusercontent.com/16992394/156664647-78cfe2af-489d-4c35-8963-9b0a46904cf7.png) 252 | 253 | > In Production, cluster metrics should be shipped to another dedicated monitoring cluster. 254 | 255 | ### Via Prometheus Exporters 256 | If you started Prometheus Exporters using `make monitoring` command. Prometheus Exporters will expose metrics at the following ports. 257 | 258 | | **Prometheus Exporter** | **Port** | **Recommended Grafana Dashboard** | 259 | |-------------------------- |---------- |------------------------------------------------ | 260 | | `elasticsearch-exporter` | `9114` | [Elasticsearch by Kristian Jensen](https://grafana.com/grafana/dashboards/4358) | 261 | | `logstash-exporter` | `9304` | [logstash-monitoring by dpavlos](https://github.com/dpavlos/logstash-monitoring) | 262 | 263 | ![Metrics](https://user-images.githubusercontent.com/16992394/78685076-89a58900-78f1-11ea-959b-ce374fe51500.jpg) 264 | 265 | # License 266 | [MIT License](https://raw.githubusercontent.com/sherifabdlnaby/elastdocker/master/LICENSE) 267 | Copyright (c) 2022 Sherif Abdel-Naby 268 | 269 | # Contribution 270 | 271 | PR(s) are Open and Welcomed. 272 | -------------------------------------------------------------------------------- /apm-server/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG ELK_VERSION 2 | 3 | # https://github.com/elastic/apm-server 4 | FROM docker.elastic.co/apm/apm-server:${ELK_VERSION} 5 | ARG ELK_VERSION 6 | -------------------------------------------------------------------------------- /apm-server/config/apm-server.yml: -------------------------------------------------------------------------------- 1 | ######################### APM Server Configuration ######################### 2 | 3 | ################################ APM Server ################################ 4 | 5 | apm-server: 6 | # Defines the host and port the server is listening on. Use "unix:/path/to.sock" to listen on a unix domain socket. 7 | host: "0.0.0.0:${APMSERVER_PORT}" 8 | 9 | 10 | #---------------------------- APM Server - Secure Communication with Agents ---------------------------- 11 | 12 | # Enable authentication using Secret token 13 | auth: 14 | secret_token: '${ELASTIC_APM_SECRET_TOKEN}' 15 | 16 | # Enable secure communication between APM agents and the server. By default ssl is disabled. 17 | ssl: 18 | enabled: true 19 | 20 | # Path to file containing the certificate for server authentication. 21 | # Needs to be configured when ssl is enabled. 22 | certificate: "/certs/apm-server.crt" 23 | 24 | # Path to file containing server certificate key. 25 | # Needs to be configured when ssl is enabled. 26 | key: "/certs/apm-server.key" 27 | 28 | #================================ Outputs ================================= 29 | 30 | # Configure the output to use when sending the data collected by apm-server. 31 | 32 | #-------------------------- Elasticsearch output -------------------------- 33 | output.elasticsearch: 34 | # Array of hosts to connect to. 35 | # Scheme and port can be left out and will be set to the default (`http` and `9200`). 36 | # In case you specify and additional path, the scheme is required: `http://elasticsearch:9200/path`. 37 | # IPv6 addresses should always be defined as: `https://[2001:db8::1]:9200`. 38 | hosts: '${ELASTICSEARCH_HOST_PORT}' 39 | 40 | # Boolean flag to enable or disable the output module. 41 | enabled: true 42 | 43 | # Protocol - either `http` (default) or `https`. 44 | protocol: "https" 45 | 46 | # Authentication credentials 47 | username: '${ELASTIC_USERNAME}' 48 | password: '${ELASTIC_PASSWORD}' 49 | 50 | # Enable custom SSL settings. Set to false to ignore custom SSL settings for secure communication. 51 | ssl.enabled: true 52 | 53 | # List of root certificates for HTTPS server verifications. 54 | ssl.certificate_authorities: ["/certs/ca.crt"] 55 | 56 | # Certificate for SSL client authentication. 57 | ssl.certificate: "/certs/apm-server.crt" 58 | 59 | # Client Certificate Key 60 | ssl.key: "/certs/apm-server.key" 61 | 62 | #============================= X-pack Monitoring ============================= 63 | 64 | # APM server can export internal metrics to a central Elasticsearch monitoring 65 | # cluster. This requires x-pack monitoring to be enabled in Elasticsearch. The 66 | # reporting is disabled by default. 67 | 68 | # Set to true to enable the monitoring reporter. 69 | monitoring.enabled: true 70 | 71 | # Most settings from the Elasticsearch output are accepted here as well. 72 | # Note that these settings should be configured to point to your Elasticsearch *monitoring* cluster. 73 | # Any setting that is not set is automatically inherited from the Elasticsearch 74 | # output configuration. This means that if you have the Elasticsearch output configured, 75 | # you can simply uncomment the following line. 76 | monitoring.elasticsearch: 77 | 78 | # Protocol - either `http` (default) or `https`. 79 | protocol: "https" 80 | 81 | # Authentication credentials 82 | username: '${ELASTIC_USERNAME}' 83 | password: '${ELASTIC_PASSWORD}' 84 | 85 | # Array of hosts to connect to. 86 | # Scheme and port can be left out and will be set to the default (`http` and `9200`). 87 | # In case you specify and additional path, the scheme is required: `http://elasticsearch:9200/path`. 88 | # IPv6 addresses should always be defined as: `https://[2001:db8::1]:9200`. 89 | hosts: '${ELASTICSEARCH_HOST_PORT}' 90 | 91 | # Enable custom SSL settings. Set to false to ignore custom SSL settings for secure communication. 92 | ssl.enabled: true 93 | 94 | # List of root certificates for HTTPS server verifications. 95 | ssl.certificate_authorities: ["/certs/ca.crt"] 96 | 97 | # Certificate for SSL client authentication. 98 | ssl.certificate: "/certs/apm-server.crt" 99 | 100 | # Client Certificate Key 101 | ssl.key: "/certs/apm-server.key" 102 | -------------------------------------------------------------------------------- /docker-compose.logs.yml: -------------------------------------------------------------------------------- 1 | version: '3.5' 2 | 3 | # will contain all elasticsearch data. 4 | volumes: 5 | filebeat-data: 6 | 7 | services: 8 | # Docker Logs Shipper ------------------------------ 9 | filebeat: 10 | image: ${FILEBEAT_IMAGE_NAME}:${ELK_VERSION} 11 | restart: always 12 | # -e flag to log to stderr and disable syslog/file output 13 | command: -e --strict.perms=false 14 | user: root 15 | environment: 16 | ELASTIC_USERNAME: ${ELASTIC_USERNAME} 17 | ELASTIC_PASSWORD: ${ELASTIC_PASSWORD} 18 | KIBANA_HOST_PORT: ${KIBANA_HOST}:${KIBANA_PORT} 19 | ELASTICSEARCH_HOST_PORT: https://${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT} 20 | volumes: 21 | - ./filebeat/filebeat.docker.logs.yml:/usr/share/filebeat/filebeat.yml:ro 22 | - /var/lib/docker/containers:/var/lib/docker/containers:ro 23 | - /var/run/docker.sock:/var/run/docker.sock:ro 24 | - filebeat-data:/var/lib/filebeat/data -------------------------------------------------------------------------------- /docker-compose.monitor.yml: -------------------------------------------------------------------------------- 1 | version: '3.5' 2 | 3 | services: 4 | 5 | # Prometheus Exporters ------------------------------ 6 | elasticsearch-exporter: 7 | image: ${ELASTICSEARCH_EXPORTER_IMAGE} 8 | restart: always 9 | command: ["--es.uri", "https://${ELASTIC_USERNAME}:${ELASTIC_PASSWORD}@${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT}", 10 | "--es.ssl-skip-verify", 11 | "--es.all", 12 | "--es.snapshots", 13 | "--es.indices"] 14 | ports: 15 | - "9114:9114" 16 | 17 | logstash-exporter: 18 | image: ${LOGSTASH_EXPORTER_IMAGE} 19 | restart: always 20 | ports: 21 | - "9304:9304" 22 | command: ["-logstash.host", "${LOGSTASH_HOST}"] 23 | 24 | # Cluster Logs Shipper ------------------------------ 25 | filebeat-cluster-logs: 26 | image: ${FILEBEAT_IMAGE_NAME}:${ELK_VERSION} 27 | restart: always 28 | # -e flag to log to stderr and disable syslog/file output 29 | command: -e --strict.perms=false 30 | user: root 31 | environment: 32 | ELASTIC_USERNAME: ${ELASTIC_USERNAME} 33 | ELASTIC_PASSWORD: ${ELASTIC_PASSWORD} 34 | KIBANA_HOST_PORT: ${KIBANA_HOST}:${KIBANA_PORT} 35 | ELASTICSEARCH_HOST_PORT: https://${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT} 36 | volumes: 37 | - ./filebeat/filebeat.monitoring.yml:/usr/share/filebeat/filebeat.yml:ro 38 | - /var/lib/docker/containers:/var/lib/docker/containers:ro 39 | - /var/run/docker.sock:/var/run/docker.sock:ro -------------------------------------------------------------------------------- /docker-compose.nodes.yml: -------------------------------------------------------------------------------- 1 | version: '3.5' 2 | 3 | # will contain all elasticsearch data. 4 | volumes: 5 | elasticsearch-data-1: 6 | elasticsearch-data-2: 7 | 8 | services: 9 | elasticsearch-1: 10 | image: ${ELASTICSEARCH_IMAGE_NAME}:${ELK_VERSION} 11 | build: 12 | context: elasticsearch/ 13 | args: 14 | ELK_VERSION: ${ELK_VERSION} 15 | restart: unless-stopped 16 | environment: 17 | ELASTIC_USERNAME: ${ELASTIC_USERNAME} 18 | ELASTIC_PASSWORD: ${ELASTIC_PASSWORD} 19 | ELASTIC_CLUSTER_NAME: ${ELASTIC_CLUSTER_NAME} 20 | ELASTIC_NODE_NAME: ${ELASTIC_NODE_NAME_1} 21 | ELASTIC_INIT_MASTER_NODE: ${ELASTIC_INIT_MASTER_NODE} 22 | ELASTIC_DISCOVERY_SEEDS: ${ELASTIC_DISCOVERY_SEEDS} 23 | ELASTICSEARCH_PORT: ${ELASTICSEARCH_PORT} 24 | ES_JAVA_OPTS: -Xmx${ELASTICSEARCH_HEAP} -Xms${ELASTICSEARCH_HEAP} -Des.enforce.bootstrap.checks=true 25 | bootstrap.memory_lock: "true" 26 | volumes: 27 | - elasticsearch-data-1:/usr/share/elasticsearch/data 28 | - ./elasticsearch/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml 29 | - ./elasticsearch/config/log4j2.properties:/usr/share/elasticsearch/config/log4j2.properties 30 | secrets: 31 | - source: elasticsearch.keystore 32 | target: /usr/share/elasticsearch/config/elasticsearch.keystore 33 | - source: elastic.ca 34 | target: /usr/share/elasticsearch/config/certs/ca.crt 35 | - source: elasticsearch.certificate 36 | target: /usr/share/elasticsearch/config/certs/elasticsearch.crt 37 | - source: elasticsearch.key 38 | target: /usr/share/elasticsearch/config/certs/elasticsearch.key 39 | ulimits: 40 | memlock: 41 | soft: -1 42 | hard: -1 43 | nofile: 44 | soft: 200000 45 | hard: 200000 46 | elasticsearch-2: 47 | image: ${ELASTICSEARCH_IMAGE_NAME}:${ELK_VERSION} 48 | build: 49 | context: elasticsearch/ 50 | args: 51 | ELK_VERSION: ${ELK_VERSION} 52 | restart: unless-stopped 53 | environment: 54 | ELASTIC_USERNAME: ${ELASTIC_USERNAME} 55 | ELASTIC_PASSWORD: ${ELASTIC_PASSWORD} 56 | ELASTIC_CLUSTER_NAME: ${ELASTIC_CLUSTER_NAME} 57 | ELASTIC_NODE_NAME: ${ELASTIC_NODE_NAME_2} 58 | ELASTIC_INIT_MASTER_NODE: ${ELASTIC_INIT_MASTER_NODE} 59 | ELASTIC_DISCOVERY_SEEDS: ${ELASTIC_DISCOVERY_SEEDS} 60 | ELASTICSEARCH_PORT: ${ELASTICSEARCH_PORT} 61 | ES_JAVA_OPTS: -Xmx${ELASTICSEARCH_HEAP} -Xms${ELASTICSEARCH_HEAP} -Des.enforce.bootstrap.checks=true 62 | bootstrap.memory_lock: "true" 63 | volumes: 64 | - elasticsearch-data-2:/usr/share/elasticsearch/data 65 | - ./elasticsearch/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml 66 | - ./elasticsearch/config/log4j2.properties:/usr/share/elasticsearch/config/log4j2.properties 67 | secrets: 68 | - source: elasticsearch.keystore 69 | target: /usr/share/elasticsearch/config/elasticsearch.keystore 70 | - source: elastic.ca 71 | target: /usr/share/elasticsearch/config/certs/ca.crt 72 | - source: elasticsearch.certificate 73 | target: /usr/share/elasticsearch/config/certs/elasticsearch.crt 74 | - source: elasticsearch.key 75 | target: /usr/share/elasticsearch/config/certs/elasticsearch.key 76 | ulimits: 77 | memlock: 78 | soft: -1 79 | hard: -1 80 | nofile: 81 | soft: 200000 82 | hard: 200000 -------------------------------------------------------------------------------- /docker-compose.setup.yml: -------------------------------------------------------------------------------- 1 | version: '3.5' 2 | 3 | services: 4 | keystore: 5 | image: ${ELASTICSEARCH_IMAGE_NAME}:${ELK_VERSION} 6 | build: 7 | context: elasticsearch/ 8 | args: 9 | ELK_VERSION: ${ELK_VERSION} 10 | command: bash /setup/setup-keystore.sh 11 | user: "0" 12 | volumes: 13 | - ./secrets:/secrets 14 | - ./setup/:/setup/ 15 | environment: 16 | ELASTIC_PASSWORD: ${ELASTIC_PASSWORD} 17 | 18 | upgrade-keystore: 19 | extends: 20 | service: keystore 21 | command: bash /setup/upgrade-keystore.sh 22 | 23 | certs: 24 | image: ${ELASTICSEARCH_IMAGE_NAME}:${ELK_VERSION} 25 | build: 26 | context: elasticsearch/ 27 | args: 28 | ELK_VERSION: ${ELK_VERSION} 29 | command: bash /setup/setup-certs.sh 30 | user: "0" 31 | volumes: 32 | - ./secrets:/secrets 33 | - ./setup/:/setup -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.5' 2 | 3 | # To Join any other app setup using another network, change name and set external = true 4 | networks: 5 | default: 6 | name: elastic 7 | external: false 8 | 9 | # will contain all elasticsearch data. 10 | volumes: 11 | elasticsearch-data: 12 | 13 | secrets: 14 | elasticsearch.keystore: 15 | file: ./secrets/keystore/elasticsearch.keystore 16 | elasticsearch.service_tokens: 17 | file: ./secrets/service_tokens 18 | elastic.ca: 19 | file: ./secrets/certs/ca/ca.crt 20 | elasticsearch.certificate: 21 | file: ./secrets/certs/elasticsearch/elasticsearch.crt 22 | elasticsearch.key: 23 | file: ./secrets/certs/elasticsearch/elasticsearch.key 24 | kibana.certificate: 25 | file: ./secrets/certs/kibana/kibana.crt 26 | kibana.key: 27 | file: ./secrets/certs/kibana/kibana.key 28 | apm-server.certificate: 29 | file: ./secrets/certs/apm-server/apm-server.crt 30 | apm-server.key: 31 | file: ./secrets/certs/apm-server/apm-server.key 32 | 33 | services: 34 | elasticsearch: 35 | image: ${ELASTICSEARCH_IMAGE_NAME}:${ELK_VERSION} 36 | build: 37 | context: elasticsearch/ 38 | args: 39 | ELK_VERSION: ${ELK_VERSION} 40 | restart: unless-stopped 41 | environment: 42 | ELASTIC_USERNAME: ${ELASTIC_USERNAME} 43 | ELASTIC_PASSWORD: ${ELASTIC_PASSWORD} 44 | ELASTIC_CLUSTER_NAME: ${ELASTIC_CLUSTER_NAME} 45 | ELASTIC_NODE_NAME: ${ELASTIC_NODE_NAME} 46 | ELASTIC_INIT_MASTER_NODE: ${ELASTIC_INIT_MASTER_NODE} 47 | ELASTIC_DISCOVERY_SEEDS: ${ELASTIC_DISCOVERY_SEEDS} 48 | ELASTICSEARCH_PORT: ${ELASTICSEARCH_PORT} 49 | ES_JAVA_OPTS: "-Xmx${ELASTICSEARCH_HEAP} -Xms${ELASTICSEARCH_HEAP} -Des.enforce.bootstrap.checks=true -Dlog4j2.formatMsgNoLookups=true" 50 | bootstrap.memory_lock: "true" 51 | volumes: 52 | - elasticsearch-data:/usr/share/elasticsearch/data 53 | - ./elasticsearch/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml 54 | - ./elasticsearch/config/log4j2.properties:/usr/share/elasticsearch/config/log4j2.properties 55 | secrets: 56 | - source: elasticsearch.keystore 57 | target: /usr/share/elasticsearch/config/elasticsearch.keystore 58 | - source: elasticsearch.service_tokens 59 | target: /usr/share/elasticsearch/config/service_tokens 60 | - source: elastic.ca 61 | target: /usr/share/elasticsearch/config/certs/ca.crt 62 | - source: elasticsearch.certificate 63 | target: /usr/share/elasticsearch/config/certs/elasticsearch.crt 64 | - source: elasticsearch.key 65 | target: /usr/share/elasticsearch/config/certs/elasticsearch.key 66 | ports: 67 | - "${ELASTICSEARCH_PORT}:${ELASTICSEARCH_PORT}" 68 | - "9300:9300" 69 | ulimits: 70 | memlock: 71 | soft: -1 72 | hard: -1 73 | nofile: 74 | soft: 200000 75 | hard: 200000 76 | healthcheck: 77 | test: ["CMD", "sh", "-c", "curl -sf --insecure https://$ELASTIC_USERNAME:$ELASTIC_PASSWORD@localhost:$ELASTICSEARCH_PORT/_cat/health | grep -ioE 'green|yellow' || echo 'not green/yellow cluster status'"] 78 | 79 | logstash: 80 | image: ${LOGSTASH_IMAGE_NAME}:${ELK_VERSION} 81 | build: 82 | context: logstash/ 83 | args: 84 | ELK_VERSION: $ELK_VERSION 85 | restart: unless-stopped 86 | volumes: 87 | - ./logstash/config/logstash.yml:/usr/share/logstash/config/logstash.yml:ro 88 | - ./logstash/config/pipelines.yml:/usr/share/logstash/config/pipelines.yml:ro 89 | - ./logstash/pipeline:/usr/share/logstash/pipeline:ro 90 | secrets: 91 | - source: elastic.ca 92 | target: /certs/ca.crt 93 | environment: 94 | ELASTIC_USERNAME: ${ELASTIC_USERNAME} 95 | ELASTIC_PASSWORD: ${ELASTIC_PASSWORD} 96 | ELASTICSEARCH_HOST_PORT: https://${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT} 97 | LS_JAVA_OPTS: "-Xmx${LOGSTASH_HEAP} -Xms${LOGSTASH_HEAP} -Dlog4j2.formatMsgNoLookups=true" 98 | ports: 99 | - "5044:5044" 100 | - "9600:9600" 101 | healthcheck: 102 | test: ["CMD", "curl", "-s" ,"-XGET", "http://127.0.0.1:9600"] 103 | 104 | kibana: 105 | image: ${KIBANA_IMAGE_NAME}:${ELK_VERSION} 106 | build: 107 | context: kibana/ 108 | args: 109 | ELK_VERSION: $ELK_VERSION 110 | restart: unless-stopped 111 | volumes: 112 | - ./kibana/config/:/usr/share/kibana/config:ro 113 | environment: 114 | ELASTIC_USERNAME: ${ELASTIC_USERNAME} 115 | ELASTIC_PASSWORD: ${ELASTIC_PASSWORD} 116 | ELASTICSEARCH_HOST_PORT: https://${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT} 117 | KIBANA_PORT: ${KIBANA_PORT} 118 | env_file: 119 | - ./secrets/.env.kibana.token 120 | secrets: 121 | - source: elastic.ca 122 | target: /certs/ca.crt 123 | - source: kibana.certificate 124 | target: /certs/kibana.crt 125 | - source: kibana.key 126 | target: /certs/kibana.key 127 | ports: 128 | - "${KIBANA_PORT}:${KIBANA_PORT}" 129 | 130 | apm-server: 131 | image: ${APM_SERVER_IMAGE_NAME}:${ELK_VERSION} 132 | build: 133 | context: apm-server/ 134 | args: 135 | ELK_VERSION: $ELK_VERSION 136 | restart: unless-stopped 137 | volumes: 138 | - ./apm-server/config/apm-server.yml:/usr/share/apm-server/apm-server.yml:ro 139 | environment: 140 | ELASTIC_USERNAME: ${ELASTIC_USERNAME} 141 | ELASTIC_PASSWORD: ${ELASTIC_PASSWORD} 142 | ELASTICSEARCH_HOST_PORT: https://${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT} 143 | ELASTIC_APM_SECRET_TOKEN: ${ELASTIC_APM_SECRET_TOKEN} 144 | APMSERVER_PORT: ${APMSERVER_PORT} 145 | ports: 146 | - "${APMSERVER_PORT}:${APMSERVER_PORT}" 147 | secrets: 148 | - source: elastic.ca 149 | target: /certs/ca.crt 150 | - source: apm-server.certificate 151 | target: /certs/apm-server.crt 152 | - source: apm-server.key 153 | target: /certs/apm-server.key 154 | -------------------------------------------------------------------------------- /elasticsearch/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG ELK_VERSION 2 | 3 | # https://github.com/elastic/elasticsearch-docker 4 | FROM docker.elastic.co/elasticsearch/elasticsearch:${ELK_VERSION} 5 | 6 | # Add healthcheck 7 | COPY scripts/docker-healthcheck . 8 | HEALTHCHECK CMD sh ./docker-healthcheck 9 | 10 | # Add your elasticsearch plugins setup here 11 | # Example: RUN elasticsearch-plugin install analysis-icu 12 | #RUN elasticsearch-plugin install --batch repository-s3 13 | -------------------------------------------------------------------------------- /elasticsearch/config/elasticsearch.yml: -------------------------------------------------------------------------------- 1 | ## Default Elasticsearch configuration from Elasticsearch base image. 2 | ## https://github.com/elastic/elasticsearch/blob/master/distribution/docker/src/docker/config/elasticsearch.yml 3 | # 4 | cluster.name: ${ELASTIC_CLUSTER_NAME} 5 | node.name: ${ELASTIC_NODE_NAME} 6 | network.host: 0.0.0.0 7 | transport.host: 0.0.0.0 8 | http.port: ${ELASTICSEARCH_PORT} 9 | 10 | ## Cluster Settings 11 | discovery.seed_hosts: ${ELASTIC_DISCOVERY_SEEDS} 12 | cluster.initial_master_nodes: ${ELASTIC_INIT_MASTER_NODE} 13 | 14 | ## License 15 | xpack.license.self_generated.type: basic 16 | 17 | # Security 18 | xpack.security.enabled: true 19 | 20 | ## - ssl 21 | xpack.security.transport.ssl.enabled: true 22 | xpack.security.transport.ssl.verification_mode: certificate 23 | xpack.security.transport.ssl.key: certs/elasticsearch.key 24 | xpack.security.transport.ssl.certificate: certs/elasticsearch.crt 25 | xpack.security.transport.ssl.certificate_authorities: certs/ca.crt 26 | 27 | ## - http 28 | xpack.security.http.ssl.enabled: true 29 | xpack.security.http.ssl.key: certs/elasticsearch.key 30 | xpack.security.http.ssl.certificate: certs/elasticsearch.crt 31 | xpack.security.http.ssl.certificate_authorities: certs/ca.crt 32 | xpack.security.http.ssl.client_authentication: optional 33 | 34 | # Monitoring 35 | xpack.monitoring.collection.enabled: true -------------------------------------------------------------------------------- /elasticsearch/config/log4j2.properties: -------------------------------------------------------------------------------- 1 | status = error 2 | 3 | # log action execution errors for easier debugging 4 | logger.action.name = org.elasticsearch.action 5 | logger.action.level = info 6 | 7 | appender.rolling.type = Console 8 | appender.rolling.name = rolling 9 | appender.rolling.layout.type = ESJsonLayout 10 | appender.rolling.layout.type_name = server 11 | 12 | rootLogger.level = info 13 | rootLogger.appenderRef.rolling.ref = rolling 14 | 15 | appender.deprecation_rolling.type = Console 16 | appender.deprecation_rolling.name = deprecation_rolling 17 | appender.deprecation_rolling.layout.type = ESJsonLayout 18 | appender.deprecation_rolling.layout.type_name = deprecation 19 | 20 | logger.deprecation.name = org.elasticsearch.deprecation 21 | logger.deprecation.level = warn 22 | logger.deprecation.appenderRef.deprecation_rolling.ref = deprecation_rolling 23 | logger.deprecation.additivity = false 24 | 25 | appender.index_search_slowlog_rolling.type = Console 26 | appender.index_search_slowlog_rolling.name = index_search_slowlog_rolling 27 | appender.index_search_slowlog_rolling.layout.type = ESJsonLayout 28 | appender.index_search_slowlog_rolling.layout.type_name = index_search_slowlog 29 | 30 | logger.index_search_slowlog_rolling.name = index.search.slowlog 31 | logger.index_search_slowlog_rolling.level = trace 32 | logger.index_search_slowlog_rolling.appenderRef.index_search_slowlog_rolling.ref = index_search_slowlog_rolling 33 | logger.index_search_slowlog_rolling.additivity = false 34 | 35 | appender.index_indexing_slowlog_rolling.type = Console 36 | appender.index_indexing_slowlog_rolling.name = index_indexing_slowlog_rolling 37 | appender.index_indexing_slowlog_rolling.layout.type = ESJsonLayout 38 | appender.index_indexing_slowlog_rolling.layout.type_name = index_indexing_slowlog 39 | 40 | logger.index_indexing_slowlog.name = index.indexing.slowlog.index 41 | logger.index_indexing_slowlog.level = trace 42 | logger.index_indexing_slowlog.appenderRef.index_indexing_slowlog_rolling.ref = index_indexing_slowlog_rolling 43 | logger.index_indexing_slowlog.additivity = false 44 | 45 | appender.audit_rolling.type = Console 46 | appender.audit_rolling.name = audit_rolling 47 | appender.audit_rolling.layout.type = PatternLayout 48 | appender.audit_rolling.layout.pattern = {\ 49 | "type": "audit", \ 50 | "timestamp":"%d{yyyy-MM-dd'T'HH:mm:ss,SSSZ}"\ 51 | %varsNotEmpty{, "node.name":"%enc{%map{node.name}}{JSON}"}\ 52 | %varsNotEmpty{, "node.id":"%enc{%map{node.id}}{JSON}"}\ 53 | %varsNotEmpty{, "host.name":"%enc{%map{host.name}}{JSON}"}\ 54 | %varsNotEmpty{, "host.ip":"%enc{%map{host.ip}}{JSON}"}\ 55 | %varsNotEmpty{, "event.type":"%enc{%map{event.type}}{JSON}"}\ 56 | %varsNotEmpty{, "event.action":"%enc{%map{event.action}}{JSON}"}\ 57 | %varsNotEmpty{, "user.name":"%enc{%map{user.name}}{JSON}"}\ 58 | %varsNotEmpty{, "user.run_by.name":"%enc{%map{user.run_by.name}}{JSON}"}\ 59 | %varsNotEmpty{, "user.run_as.name":"%enc{%map{user.run_as.name}}{JSON}"}\ 60 | %varsNotEmpty{, "user.realm":"%enc{%map{user.realm}}{JSON}"}\ 61 | %varsNotEmpty{, "user.run_by.realm":"%enc{%map{user.run_by.realm}}{JSON}"}\ 62 | %varsNotEmpty{, "user.run_as.realm":"%enc{%map{user.run_as.realm}}{JSON}"}\ 63 | %varsNotEmpty{, "user.roles":%map{user.roles}}\ 64 | %varsNotEmpty{, "origin.type":"%enc{%map{origin.type}}{JSON}"}\ 65 | %varsNotEmpty{, "origin.address":"%enc{%map{origin.address}}{JSON}"}\ 66 | %varsNotEmpty{, "realm":"%enc{%map{realm}}{JSON}"}\ 67 | %varsNotEmpty{, "url.path":"%enc{%map{url.path}}{JSON}"}\ 68 | %varsNotEmpty{, "url.query":"%enc{%map{url.query}}{JSON}"}\ 69 | %varsNotEmpty{, "request.method":"%enc{%map{request.method}}{JSON}"}\ 70 | %varsNotEmpty{, "request.body":"%enc{%map{request.body}}{JSON}"}\ 71 | %varsNotEmpty{, "request.id":"%enc{%map{request.id}}{JSON}"}\ 72 | %varsNotEmpty{, "action":"%enc{%map{action}}{JSON}"}\ 73 | %varsNotEmpty{, "request.name":"%enc{%map{request.name}}{JSON}"}\ 74 | %varsNotEmpty{, "indices":%map{indices}}\ 75 | %varsNotEmpty{, "opaque_id":"%enc{%map{opaque_id}}{JSON}"}\ 76 | %varsNotEmpty{, "x_forwarded_for":"%enc{%map{x_forwarded_for}}{JSON}"}\ 77 | %varsNotEmpty{, "transport.profile":"%enc{%map{transport.profile}}{JSON}"}\ 78 | %varsNotEmpty{, "rule":"%enc{%map{rule}}{JSON}"}\ 79 | %varsNotEmpty{, "event.category":"%enc{%map{event.category}}{JSON}"}\ 80 | }%n 81 | # "node.name" node name from the `elasticsearch.yml` settings 82 | # "node.id" node id which should not change between cluster restarts 83 | # "host.name" unresolved hostname of the local node 84 | # "host.ip" the local bound ip (i.e. the ip listening for connections) 85 | # "event.type" a received REST request is translated into one or more transport requests. This indicates which processing layer generated the event "rest" or "transport" (internal) 86 | # "event.action" the name of the audited event, eg. "authentication_failed", "access_granted", "run_as_granted", etc. 87 | # "user.name" the subject name as authenticated by a realm 88 | # "user.run_by.name" the original authenticated subject name that is impersonating another one. 89 | # "user.run_as.name" if this "event.action" is of a run_as type, this is the subject name to be impersonated as. 90 | # "user.realm" the name of the realm that authenticated "user.name" 91 | # "user.run_by.realm" the realm name of the impersonating subject ("user.run_by.name") 92 | # "user.run_as.realm" if this "event.action" is of a run_as type, this is the realm name the impersonated user is looked up from 93 | # "user.roles" the roles array of the user; these are the roles that are granting privileges 94 | # "origin.type" it is "rest" if the event is originating (is in relation to) a REST request; possible other values are "transport" and "ip_filter" 95 | # "origin.address" the remote address and port of the first network hop, i.e. a REST proxy or another cluster node 96 | # "realm" name of a realm that has generated an "authentication_failed" or an "authentication_successful"; the subject is not yet authenticated 97 | # "url.path" the URI component between the port and the query string; it is percent (URL) encoded 98 | # "url.query" the URI component after the path and before the fragment; it is percent (URL) encoded 99 | # "request.method" the method of the HTTP request, i.e. one of GET, POST, PUT, DELETE, OPTIONS, HEAD, PATCH, TRACE, CONNECT 100 | # "request.body" the content of the request body entity, JSON escaped 101 | # "request.id" a synthentic identifier for the incoming request, this is unique per incoming request, and consistent across all audit events generated by that request 102 | # "action" an action is the most granular operation that is authorized and this identifies it in a namespaced way (internal) 103 | # "request.name" if the event is in connection to a transport message this is the name of the request class, similar to how rest requests are identified by the url path (internal) 104 | # "indices" the array of indices that the "action" is acting upon 105 | # "opaque_id" opaque value conveyed by the "X-Opaque-Id" request header 106 | # "x_forwarded_for" the addresses from the "X-Forwarded-For" request header, as a verbatim string value (not an array) 107 | # "transport.profile" name of the transport profile in case this is a "connection_granted" or "connection_denied" event 108 | # "rule" name of the applied rulee if the "origin.type" is "ip_filter" 109 | # "event.category" fixed value "elasticsearch-audit" 110 | 111 | logger.xpack_security_audit_logfile.name = org.elasticsearch.xpack.security.audit.logfile.LoggingAuditTrail 112 | logger.xpack_security_audit_logfile.level = info 113 | logger.xpack_security_audit_logfile.appenderRef.audit_rolling.ref = audit_rolling 114 | logger.xpack_security_audit_logfile.additivity = false 115 | 116 | logger.xmlsig.name = org.apache.xml.security.signature.XMLSignature 117 | logger.xmlsig.level = error 118 | logger.samlxml_decrypt.name = org.opensaml.xmlsec.encryption.support.Decrypter 119 | logger.samlxml_decrypt.level = fatal 120 | logger.saml2_decrypt.name = org.opensaml.saml.saml2.encryption.Decrypter 121 | logger.saml2_decrypt.level = fatal -------------------------------------------------------------------------------- /elasticsearch/scripts/docker-healthcheck: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -eo pipefail 3 | 4 | host="$(hostname --ip-address || echo '127.0.0.1')" 5 | 6 | if health="$(curl -fsSL "https://$ELASTIC_USERNAME:$ELASTIC_PASSWORD@$host:$ELASTICSEARCH_PORT/_cat/health?h=status" --insecure)"; then 7 | health="$(echo "$health" | sed -r 's/^[[:space:]]+|[[:space:]]+$//g')" # trim whitespace (otherwise we'll have "green ") 8 | if [ "$health" = 'green' ] || [ "$health" = "yellow" ]; then 9 | exit 0 10 | fi 11 | echo >&2 "unexpected health status: $health" 12 | fi 13 | 14 | exit 1 15 | -------------------------------------------------------------------------------- /filebeat/filebeat.docker.logs.yml: -------------------------------------------------------------------------------- 1 | #================================== Description ======================================== 2 | # Filebeat Config to send Elasticsearch/Logstash/Kibana in a docker host to Elasticsea- 3 | # sh cluster. 4 | 5 | name: filebeat-docker-logs-shipper 6 | 7 | filebeat.config: 8 | modules: 9 | path: ${path.config}/modules.d/*.yml 10 | reload.enabled: false 11 | 12 | #================================ Autodiscover ======================================= 13 | # Autodiscover all containers with elasticsearch images, and add an separate input for 14 | # each container and log type. 15 | filebeat.autodiscover: 16 | providers: 17 | - type: docker 18 | templates: 19 | - condition: 20 | and: 21 | - not.contains: 22 | docker.container.image: elasticsearch 23 | - not.contains: 24 | docker.container.image: logstash 25 | - not.contains: 26 | docker.container.image: kibana 27 | config: 28 | - type: container 29 | paths: 30 | - /var/lib/docker/containers/${data.docker.container.id}/*.log 31 | 32 | processors: 33 | - add_cloud_metadata: ~ 34 | 35 | # Output to Logstash 36 | output.logstash: 37 | hosts: ["logstash:5044"] 38 | 39 | #=================================== Kibana ========================================== 40 | # Enable setting up Kibana 41 | # Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API. 42 | # This requires a Kibana endpoint configuration. 43 | setup: 44 | kibana: 45 | host: '${KIBANA_HOST_PORT}' 46 | username: '${ELASTIC_USERNAME}' 47 | password: '${ELASTIC_PASSWORD}' 48 | 49 | #==================================== Monitoring ===================================== 50 | # Enable Monitoring Beats 51 | # Filebeat can export internal metrics to a central Elasticsearch monitoring 52 | # cluster. This requires xpack monitoring to be enabled in Elasticsearch 53 | 54 | # Use deprecated option to avoid current UX bug in 7.3.0 where filebeat creates a 55 | # standalone monitoring cluster in the monitoring UI. 56 | # see: https://github.com/elastic/beats/pull/13182 57 | xpack.monitoring: 58 | enabled: true 59 | elasticsearch: 60 | hosts: '${ELASTICSEARCH_HOST_PORT}' 61 | username: '${ELASTIC_USERNAME}' 62 | password: '${ELASTIC_PASSWORD}' 63 | 64 | 65 | #monitoring: 66 | # enabled: true 67 | # elasticsearch: 68 | # hosts: '${ELASTICSEARCH_HOST_PORT}' 69 | # username: '${ELASTIC_USERNAME}' 70 | # password: '${ELASTIC_PASSWORD}' 71 | 72 | #================================ HTTP Endpoint ====================================== 73 | # Enabled so we can monitor filebeat using filebeat exporter if needed. 74 | # Each beat can expose internal metrics through a HTTP endpoint. For security 75 | # reasons the endpoint is disabled by default. This feature is currently experimental. 76 | # Stats can be access through http://localhost:5066/stats . For pretty JSON output 77 | # append ?pretty to the URL. 78 | 79 | # Defines if the HTTP endpoint is enabled. 80 | http.enabled: true 81 | http.host: 0.0.0.0 82 | http.port: 5066 83 | -------------------------------------------------------------------------------- /filebeat/filebeat.monitoring.yml: -------------------------------------------------------------------------------- 1 | #================================== Description ======================================== 2 | # Filebeat Config to send Elasticsearch/Logstash/Kibana in a docker host to Elasticsea- 3 | # sh cluster. 4 | 5 | name: filebeat-elk-monitoring 6 | 7 | filebeat.config: 8 | modules: 9 | path: ${path.config}/modules.d/*.yml 10 | reload.enabled: false 11 | 12 | #================================ Autodiscover ======================================= 13 | # Autodiscover all containers with elasticsearch images, and add an separate input for 14 | # each container and log type. 15 | filebeat.autodiscover: 16 | providers: 17 | - type: docker 18 | templates: 19 | - condition: 20 | contains: 21 | docker.container.image: elasticsearch 22 | config: 23 | - module: elasticsearch 24 | server: 25 | input: 26 | type: container 27 | paths: '/var/lib/docker/containers/${data.docker.container.id}/*.log' 28 | gc: 29 | input: 30 | type: container 31 | paths: '/var/lib/docker/containers/${data.docker.container.id}/*.log' 32 | audit: 33 | input: 34 | type: container 35 | paths: '/var/lib/docker/containers/${data.docker.container.id}/*.log' 36 | slowlog: 37 | input: 38 | type: container 39 | paths: '/var/lib/docker/containers/${data.docker.container.id}/*.log' 40 | deprecation: 41 | input: 42 | type: container 43 | paths: '/var/lib/docker/containers/${data.docker.container.id}/*.log' 44 | - type: docker 45 | templates: 46 | - condition: 47 | contains: 48 | docker.container.image: kibana 49 | config: 50 | - module: kibana 51 | log: 52 | input: 53 | type: container 54 | paths: '/var/lib/docker/containers/${data.docker.container.id}/*.log' 55 | - type: docker 56 | templates: 57 | - condition: 58 | contains: 59 | docker.container.image: logstash 60 | config: 61 | - module: logstash 62 | log: 63 | input: 64 | type: container 65 | paths: '/var/lib/docker/containers/${data.docker.container.id}/*.log' 66 | slowlog: 67 | input: 68 | type: container 69 | paths: '/var/lib/docker/containers/${data.docker.container.id}/*.log' 70 | 71 | 72 | processors: 73 | - add_cloud_metadata: ~ 74 | 75 | # Output to ES directly. 76 | output.elasticsearch: 77 | hosts: '${ELASTICSEARCH_HOST_PORT}' 78 | username: '${ELASTIC_USERNAME}' 79 | password: '${ELASTIC_PASSWORD}' 80 | ssl: 81 | verification_mode: "none" 82 | 83 | 84 | #=================================== Kibana ========================================== 85 | # Enable setting up Kibana 86 | # Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API. 87 | # This requires a Kibana endpoint configuration. 88 | setup: 89 | kibana: 90 | host: '${KIBANA_HOST_PORT}' 91 | username: '${ELASTIC_USERNAME}' 92 | password: '${ELASTIC_PASSWORD}' 93 | 94 | #==================================== Monitoring ===================================== 95 | # Enable Monitoring Beats 96 | # Filebeat can export internal metrics to a central Elasticsearch monitoring 97 | # cluster. This requires xpack monitoring to be enabled in Elasticsearch 98 | 99 | # Use deprecated option to avoid current UX bug in 7.3.0 where filebeat creates a 100 | # standalone monitoring cluster in the monitoring UI. 101 | # see: https://github.com/elastic/beats/pull/13182 102 | xpack.monitoring: 103 | enabled: true 104 | # elasticsearch: 105 | # hosts: '${ELASTICSEARCH_HOST_PORT}' 106 | # username: '${ELASTIC_USERNAME}' 107 | # password: '${ELASTIC_PASSWORD}' 108 | 109 | #monitoring: 110 | # enabled: true 111 | # elasticsearch: 112 | # hosts: '${ELASTICSEARCH_HOST_PORT}' 113 | # username: '${ELASTIC_USERNAME}' 114 | # password: '${ELASTIC_PASSWORD}' 115 | # ssl.enabled: true 116 | # ssl.verification_mode: none 117 | 118 | #================================ HTTP Endpoint ====================================== 119 | # Enabled so we can monitor filebeat using filebeat exporter if needed. 120 | # Each beat can expose internal metrics through a HTTP endpoint. For security 121 | # reasons the endpoint is disabled by default. This feature is currently experimental. 122 | # Stats can be access through http://localhost:5066/stats . For pretty JSON output 123 | # append ?pretty to the URL. 124 | 125 | # Defines if the HTTP endpoint is enabled. 126 | http.enabled: true 127 | http.host: 0.0.0.0 128 | http.port: 5066 129 | -------------------------------------------------------------------------------- /kibana/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG ELK_VERSION 2 | 3 | # https://github.com/elastic/kibana-docker 4 | FROM docker.elastic.co/kibana/kibana:${ELK_VERSION} 5 | ARG ELK_VERSION 6 | 7 | # Add your kibana plugins setup here 8 | # Example: RUN kibana-plugin install -------------------------------------------------------------------------------- /kibana/config/kibana.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ## Default Kibana configuration from Kibana base image. 3 | ## https://github.com/elastic/kibana/blob/master/src/dev/build/tasks/os_packages/docker_generator/templates/kibana_yml.template.js 4 | # 5 | server.name: kibana 6 | server.host: "0.0.0.0" 7 | server.port: ${KIBANA_PORT} 8 | 9 | # Elasticsearch Connection 10 | elasticsearch.hosts: [ "${ELASTICSEARCH_HOST_PORT}" ] 11 | 12 | # SSL settings 13 | server.ssl.enabled: true 14 | server.ssl.certificate: /certs/kibana.crt 15 | server.ssl.key: /certs/kibana.key 16 | server.ssl.certificateAuthorities: [ "/certs/ca.crt" ] 17 | xpack.security.encryptionKey: C1tHnfrlfxSPxPlQ8BlgPB5qMNRtg5V5 18 | xpack.encryptedSavedObjects.encryptionKey: D12GTfrlfxSPxPlGRBlgPB5qM5GOPDV5 19 | xpack.reporting.encryptionKey: RSCueeHKzrqzOVTJhkjt17EMnzM96LlN 20 | 21 | ## X-Pack security credentials 22 | elasticsearch.serviceAccountToken: "${KIBANA_SERVICE_ACCOUNT_TOKEN}" 23 | elasticsearch.ssl.certificateAuthorities: [ "/certs/ca.crt" ] 24 | 25 | ## Add policy for apm-server integration 26 | xpack.fleet.packages: 27 | - name: apm 28 | version: latest 29 | xpack.fleet.agentPolicies: 30 | - name: Agent policy 1 31 | id: agent-policy-1 32 | namespace: default 33 | monitoring_enabled: 34 | - logs 35 | - metrics 36 | package_policies: 37 | - name: apm-1 38 | id: default-apm 39 | package: 40 | name: apm 41 | 42 | ## Misc 43 | elasticsearch.requestTimeout: 90000 44 | 45 | 46 | 47 | ## ElastAlert Plugin 48 | #elastalert-kibana-plugin.serverHost: elastalert 49 | #elastalert-kibana-plugin.serverPort: 3030 50 | -------------------------------------------------------------------------------- /logstash/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG ELK_VERSION 2 | 3 | # https://github.com/elastic/logstash-docker 4 | FROM docker.elastic.co/logstash/logstash:${ELK_VERSION} 5 | 6 | HEALTHCHECK --interval=240s --timeout=120s --retries=5 \ 7 | CMD curl -s -XGET 'http://127.0.0.1:9600' 8 | 9 | # Add your logstash plugins setup here 10 | # Example: RUN logstash-plugin install logstash-filter-json 11 | -------------------------------------------------------------------------------- /logstash/config/logstash.yml: -------------------------------------------------------------------------------- 1 | --- 2 | http.host: "0.0.0.0" 3 | 4 | ## X-Pack security credentials 5 | xpack.monitoring.elasticsearch.hosts: ${ELASTICSEARCH_HOST_PORT} 6 | xpack.monitoring.enabled: true 7 | xpack.monitoring.elasticsearch.username: ${ELASTIC_USERNAME} 8 | xpack.monitoring.elasticsearch.password: ${ELASTIC_PASSWORD} 9 | xpack.monitoring.elasticsearch.ssl.certificate_authority: /certs/ca.crt -------------------------------------------------------------------------------- /logstash/config/pipelines.yml: -------------------------------------------------------------------------------- 1 | # For per pipeline config, check docs: https://www.elastic.co/guide/en/logstash/current/logstash-settings-file.html 2 | 3 | - pipeline.id: main 4 | path.config: "/usr/share/logstash/pipeline/main.conf" 5 | queue.type: memory 6 | 7 | #- pipeline.id: second_pipeline 8 | # path.config: "/usr/share/logstash/pipeline/second.conf" 9 | # queue.type: persisted 10 | # pipeline.batch.size: 125 11 | # queue.page_capacity: 50mb -------------------------------------------------------------------------------- /logstash/pipeline/main.conf: -------------------------------------------------------------------------------- 1 | input { 2 | beats { 3 | port => 5044 4 | } 5 | } 6 | 7 | filter { 8 | 9 | } 10 | 11 | output { 12 | elasticsearch { 13 | hosts => "${ELASTICSEARCH_HOST_PORT}" 14 | user => "${ELASTIC_USERNAME}" 15 | password => "${ELASTIC_PASSWORD}" 16 | ssl => true 17 | ssl_certificate_verification => false 18 | cacert => "/certs/ca.crt" 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /secrets/certs/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sherifabdlnaby/elastdocker/64326a5b6fa86fdfdde40457adcd8bd599217e8c/secrets/certs/.gitkeep -------------------------------------------------------------------------------- /secrets/keystore/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sherifabdlnaby/elastdocker/64326a5b6fa86fdfdde40457adcd8bd599217e8c/secrets/keystore/.gitkeep -------------------------------------------------------------------------------- /setup/instances.yml: -------------------------------------------------------------------------------- 1 | instances: 2 | - name: elasticsearch 3 | dns: 4 | - elasticsearch 5 | - localhost 6 | ip: 7 | - 127.0.0.1 8 | 9 | - name: kibana 10 | dns: 11 | - kibana 12 | - localhost 13 | ip: 14 | - 127.0.0.1 15 | 16 | - name: apm-server 17 | dns: 18 | - apm-server 19 | - localhost 20 | ip: 21 | - 127.0.0.1 22 | -------------------------------------------------------------------------------- /setup/keystore.sh: -------------------------------------------------------------------------------- 1 | # Exit on Error 2 | set -e 3 | 4 | # Setting Bootstrap Password 5 | echo "Setting bootstrap.password..." 6 | (echo "$ELASTIC_PASSWORD" | elasticsearch-keystore add -x 'bootstrap.password') 7 | 8 | # ----- Setting Secrets 9 | 10 | ## Add Additional Config 11 | # 1- Copy the below commented block, uncomment it, and replace , , and . 12 | # 2- Pass to setup container in `docker-compose-setup.yml` 13 | 14 | ## Setting 15 | #echo "Setting ..." 16 | #(echo "$" | elasticsearch-keystore add -x '') 17 | 18 | 19 | # ----- Setting S3 Secrets 20 | 21 | ## Setting S3 Access Key 22 | #echo "Setting S3 Access Key..." 23 | #(echo "$AWS_ACCESS_KEY_ID" | elasticsearch-keystore add -x 's3.client.default.access_key') 24 | # 25 | ## Setting S3 Secret Key 26 | #echo "Setting S3 Secret Key..." 27 | #(echo "$AWS_SECRET_ACCESS_KEY" | elasticsearch-keystore add -x 's3.client.default.secret_key') -------------------------------------------------------------------------------- /setup/setup-certs.sh: -------------------------------------------------------------------------------- 1 | # Exit on Error 2 | set -e 3 | 4 | OUTPUT_DIR=/secrets/certs 5 | ZIP_CA_FILE=$OUTPUT_DIR/ca.zip 6 | ZIP_FILE=$OUTPUT_DIR/certs.zip 7 | 8 | printf "======= Generating Elastic Stack Certificates =======\n" 9 | printf "=====================================================\n" 10 | 11 | if ! command -v unzip &>/dev/null; then 12 | printf "Installing Necessary Tools... \n" 13 | yum install -y -q -e 0 unzip; 14 | fi 15 | 16 | printf "Clearing Old Certificates if exits... \n" 17 | mkdir -p $OUTPUT_DIR 18 | find $OUTPUT_DIR -type d -exec rm -rf -- {} + 19 | mkdir -p $OUTPUT_DIR/ca 20 | 21 | 22 | printf "Generating CA Certificates... \n" 23 | PASSWORD=`openssl rand -base64 32` 24 | /usr/share/elasticsearch/bin/elasticsearch-certutil ca --pass "$PASSWORD" --pem --out $ZIP_CA_FILE &> /dev/null 25 | printf "Generating Certificates... \n" 26 | unzip -qq $ZIP_CA_FILE -d $OUTPUT_DIR; 27 | /usr/share/elasticsearch/bin/elasticsearch-certutil cert --silent --pem --ca-cert $OUTPUT_DIR/ca/ca.crt --ca-key $OUTPUT_DIR/ca/ca.key --ca-pass "$PASSWORD" --in /setup/instances.yml -out $ZIP_FILE &> /dev/null 28 | 29 | printf "Unzipping Certifications... \n" 30 | unzip -qq $ZIP_FILE -d $OUTPUT_DIR; 31 | 32 | printf "Applying Permissions... \n" 33 | chown -R 1000:0 $OUTPUT_DIR 34 | find $OUTPUT_DIR -type f -exec chmod 655 -- {} + 35 | 36 | printf "=====================================================\n" 37 | printf "SSL Certifications generation completed successfully.\n" 38 | printf "=====================================================\n" 39 | -------------------------------------------------------------------------------- /setup/setup-keystore.sh: -------------------------------------------------------------------------------- 1 | # Exit on Error 2 | set -e 3 | 4 | GENERATED_KEYSTORE=/usr/share/elasticsearch/config/elasticsearch.keystore 5 | OUTPUT_KEYSTORE=/secrets/keystore/elasticsearch.keystore 6 | 7 | GENERATED_SERVICE_TOKENS=/usr/share/elasticsearch/config/service_tokens 8 | OUTPUT_SERVICE_TOKENS=/secrets/service_tokens 9 | OUTPUT_KIBANA_TOKEN=/secrets/.env.kibana.token 10 | 11 | # Password Generate 12 | PW=$(head /dev/urandom | tr -dc A-Za-z0-9 | head -c 16 ;) 13 | ELASTIC_PASSWORD="${ELASTIC_PASSWORD:-$PW}" 14 | export ELASTIC_PASSWORD 15 | 16 | # Create Keystore 17 | printf "========== Creating Elasticsearch Keystore ==========\n" 18 | printf "=====================================================\n" 19 | elasticsearch-keystore create >> /dev/null 20 | 21 | # Setting Secrets and Bootstrap Password 22 | sh /setup/keystore.sh 23 | echo "Elastic Bootstrap Password is: $ELASTIC_PASSWORD" 24 | 25 | # Generating Kibana Token 26 | echo "Generating Kibana Service Token..." 27 | 28 | # Delete old token if exists 29 | /usr/share/elasticsearch/bin/elasticsearch-service-tokens delete elastic/kibana default &> /dev/null || true 30 | 31 | # Generate new token 32 | TOKEN=$(/usr/share/elasticsearch/bin/elasticsearch-service-tokens create elastic/kibana default | cut -d '=' -f2 | tr -d ' ') 33 | echo "Kibana Service Token is: $TOKEN" 34 | echo "KIBANA_SERVICE_ACCOUNT_TOKEN=$TOKEN" > $OUTPUT_KIBANA_TOKEN 35 | 36 | # Replace current Keystore 37 | if [ -f "$OUTPUT_KEYSTORE" ]; then 38 | echo "Remove old elasticsearch.keystore" 39 | rm $OUTPUT_KEYSTORE 40 | fi 41 | 42 | echo "Saving new elasticsearch.keystore" 43 | mkdir -p "$(dirname $OUTPUT_KEYSTORE)" 44 | mv $GENERATED_KEYSTORE $OUTPUT_KEYSTORE 45 | chmod 0644 $OUTPUT_KEYSTORE 46 | 47 | # Replace current Service Tokens File 48 | if [ -f "$OUTPUT_SERVICE_TOKENS" ]; then 49 | echo "Remove old service_tokens file" 50 | rm $OUTPUT_SERVICE_TOKENS 51 | fi 52 | 53 | echo "Saving new service_tokens file" 54 | mv $GENERATED_SERVICE_TOKENS $OUTPUT_SERVICE_TOKENS 55 | chmod 0644 $OUTPUT_SERVICE_TOKENS 56 | 57 | printf "======= Keystore setup completed successfully =======\n" 58 | printf "=====================================================\n" 59 | printf "Remember to restart the stack, or reload secure settings if changed settings are hot-reloadable.\n" 60 | printf "About Reloading Settings: https://www.elastic.co/guide/en/elasticsearch/reference/current/secure-settings.html#reloadable-secure-settings\n" 61 | printf "=====================================================\n" 62 | printf "Your 'elastic' user password is: $ELASTIC_PASSWORD\n" 63 | printf "Your Kibana Service Token is: $TOKEN\n" 64 | printf "=====================================================\n" 65 | -------------------------------------------------------------------------------- /setup/upgrade-keystore.sh: -------------------------------------------------------------------------------- 1 | # Exit on Error 2 | set -e 3 | 4 | KEYSTORE_TO_UPGRADE=/secrets/keystore/elasticsearch.keystore 5 | KEYSTORE_TO_UPGRADE_BACKUP=$KEYSTORE_TO_UPGRADE.pre-upgrade 6 | KEYSTORE_LOCATION_FOR_TOOL=/usr/share/elasticsearch/config/elasticsearch.keystore 7 | 8 | if [ -f $KEYSTORE_TO_UPGRADE_BACKUP ]; then 9 | echo "A backup of a previous run of this script was found at $KEYSTORE_TO_UPGRADE_BACKUP. Aborting execution!" 10 | echo "Please remove the backup file and run the script again if you're sure that you want to run the upgrade script again." 11 | exit 1 12 | fi 13 | 14 | echo "=========== Upgrading Elasticsearch Keystore ==========" 15 | 16 | cp $KEYSTORE_TO_UPGRADE $KEYSTORE_LOCATION_FOR_TOOL 17 | 18 | echo "Running elasticsearch-keystore upgrade" 19 | elasticsearch-keystore upgrade 20 | 21 | mv $KEYSTORE_TO_UPGRADE $KEYSTORE_TO_UPGRADE_BACKUP 22 | mv $KEYSTORE_LOCATION_FOR_TOOL $KEYSTORE_TO_UPGRADE 23 | 24 | echo "======= Keystore upgrade completed successfully =======" 25 | echo "Old keystore was backed up to $KEYSTORE_TO_UPGRADE_BACKUP" 26 | --------------------------------------------------------------------------------