├── .devcontainer └── python_development │ └── devcontainer.json ├── .gitattributes ├── .github ├── ISSUE_TEMPLATE │ ├── bug-or-error-report.md │ └── feature_request.md ├── PULL_REQUEST_TEMPLATE.md ├── README-github.md ├── changelog-configuration.json └── workflows │ ├── build_release.yaml │ ├── burndown_chart.yml │ ├── cluster.yml │ ├── linux_only.yml │ ├── main.yml │ └── upgrade.yml ├── .gitignore ├── CONTRIBUTING.md ├── LICENSE ├── README.md ├── RELEASES.md ├── ansible ├── install_lme_local.yml ├── post_install_local.yml └── set_fleet.yml ├── build ├── Readme.md ├── emoji-filter.lua ├── includes.txt ├── makerelativepaths.lua ├── parse_breaks.lua └── setup.tex ├── config ├── containers.conf ├── containers.txt ├── elastalert2 │ ├── config.yaml │ ├── misc │ │ └── smtp_auth.yml │ └── rules │ │ ├── example-email-rule.yml │ │ └── windows_event_logs_cleared.yaml ├── elasticsearch.yml ├── example.env ├── kibana.yml ├── setup │ ├── acct-init.sh │ ├── init-setup.sh │ └── instances.yml └── wazuh_cluster │ └── wazuh_manager.conf ├── dashboards ├── Readme.md ├── elastic │ ├── alerting_dashboard_2_0.ndjson │ ├── computer_software_overview_2_0.ndjson │ ├── credential_access_logs_dashboard_2_0.ndjson │ ├── healthcheck_dashboard_overview_2_0.ndjson │ ├── identity_access_management_2_0.ndjson │ ├── policy_changes_and_system_activity_2_0.ndjson │ ├── privileged_activity_log_dashboards_2_0.ndjson │ ├── process_explorer_2_0.ndjson │ ├── security_dashboard_security_log_2_0.ndjson │ ├── sysmon_summary_2_0.ndjson │ ├── user_hr_2_0.ndjson │ └── user_security_2_0.ndjson ├── export_dashboards.py ├── requirements.txt └── wazuh │ ├── wazuh_incident_response.dumped.ndjson │ ├── wazuh_malware_detection.dumped.ndjson │ ├── wazuh_security_events.dumped.ndjson │ └── wazuh_vulnerabilities.dumped.ndjson ├── docker ├── 22.04 │ ├── Dockerfile │ ├── check-lme-setup.ps1 │ ├── check-lme-setup.sh │ ├── docker-compose.yml │ ├── environment_example.sh │ ├── lme-init.sh │ └── lme-setup.service ├── 24.04 │ ├── Dockerfile │ ├── check-lme-setup.ps1 │ ├── check-lme-setup.sh │ ├── docker-compose.yml │ ├── environment.sh │ ├── environment_example.sh │ ├── lme-init.sh │ └── lme-setup.service ├── README.md └── install_latest_docker_in_ubuntu.sh ├── docs ├── imgs │ ├── AdjustForwardedEventsLogSize.png │ ├── OverviewDiagram.png │ ├── add-exceptions.png │ ├── alert-enable-menu.png │ ├── backup_pics │ │ ├── policy_1.png │ │ ├── policy_2.png │ │ ├── policy_3.png │ │ ├── policy_4.png │ │ ├── policy_5.png │ │ ├── policy_6.png │ │ ├── repository_1.png │ │ ├── repository_2.png │ │ ├── repository_3.png │ │ └── snapshot_and_restore.png │ ├── chapter_overview.jpg │ ├── cisa.png │ ├── close-index.png │ ├── createindex.jpg │ ├── dashboard.jpg │ ├── dashboard │ │ ├── app_password.png │ │ ├── dataview-create.png │ │ ├── delete-import-dashboards.png │ │ ├── discover-pivot-1.png │ │ ├── discover-pivot-2.png │ │ ├── elastalert-dataview.png │ │ ├── lme-dashboards-list.png │ │ └── wazuh-dashboards-list.png │ ├── default-index-pattern.png │ ├── default-winlogbeat.png │ ├── delete-indices.jpg │ ├── delete-originals.png │ ├── dev-tools.jpg │ ├── discover_tab.jpg │ ├── duplicate-indices.jpg │ ├── edit-update-script.png │ ├── elkstack.jpg │ ├── error.png │ ├── event_viewer_prompt.png │ ├── eventforwarding_overview.jpg │ ├── eventviewer.jpg │ ├── example-exception.png │ ├── exceptions.png │ ├── extra_beats_pics │ │ ├── deletion-enable.png │ │ ├── filebeat-selection.png │ │ ├── filebeat.png │ │ ├── ilm.png │ │ ├── logstash-writer.png │ │ ├── roles.png │ │ ├── stack-management.png │ │ ├── update-retention.png │ │ └── update-role.png │ ├── firstload.jpg │ ├── fleetservermissingurl.png │ ├── git-flow.png │ ├── gpo.jpg │ ├── gpo_pics │ │ ├── aduc.jpg │ │ ├── create_new_object.jpg │ │ ├── gpmc.jpg │ │ ├── import_done.jpg │ │ ├── import_new_object.jpg │ │ ├── link_an_ou.jpg │ │ ├── name_new_object.jpg │ │ ├── new_ou.jpg │ │ ├── optional_features.png │ │ ├── rsat_gpmc_optional_features.png │ │ ├── select_backup.jpg │ │ └── select_gpo_link.jpg │ ├── gpoedit.jpg │ ├── healthcheckstatus.jpg │ ├── import.jpg │ ├── import.png │ ├── import1.png │ ├── import2.png │ ├── index-patterns.png │ ├── index-selection.png │ ├── insecure-powershell.png │ ├── lme-architecture-v2.jpg │ ├── lme-architecture-v2.png │ ├── lme-cloud.jpg │ ├── lme-image.png │ ├── logistics.png │ ├── nav-bar.png │ ├── re-index-script.jpg │ ├── restore-details.png │ ├── restore.png │ ├── retention_pics │ │ └── retention_1.png │ ├── rules_error.png │ ├── select-rule.png │ ├── siem.png │ ├── siem1.png │ ├── siem2.png │ ├── siem3.png │ ├── siem4.png │ ├── siem5.png │ ├── siem6.png │ ├── snap-restore.png │ ├── stack-management.jpg │ ├── stack-management.png │ ├── sysmon-task-properties.png │ ├── sysvol.jpg │ ├── task-complete.png │ ├── task-status.png │ ├── task.png │ ├── testing-screenshots │ │ ├── delete.png │ │ ├── shell.png │ │ ├── shell2.png │ │ ├── shell3.png │ │ ├── shell4.png │ │ └── shell5.png │ ├── timefilter.jpg │ ├── timerange.jpg │ ├── timerange.png │ ├── troubleshooting-overview.jpg │ ├── update-rules.png │ ├── usersec.png │ ├── verify.png │ ├── winlogbeat-install.png │ ├── winlogbeat-location.png │ ├── winlogbeat-running.png │ └── winscp.jpg └── markdown │ ├── agents │ ├── elastic-agent-management.md │ ├── wazuh-active-response.md │ └── wazuh-agent-management.md │ ├── endpoint-tools │ ├── install-auditd.md │ └── install-sysmon.md │ ├── logging-guidance │ ├── cloud.md │ ├── filtering.md │ └── retention.md │ ├── maintenance │ ├── Encryption_at_rest_option_for_users.md │ ├── backups.md │ ├── certificates.md │ ├── elastalert-rules.md │ ├── index-management.md │ ├── upgrading.md │ ├── volume-management.md │ ├── vulnerability-scan-setup.md │ └── wazuh-configuration.md │ ├── prerequisites.md │ └── reference │ ├── architecture.md │ ├── configuration.md │ ├── dashboard-descriptions.md │ ├── faq.md │ ├── passwords.md │ ├── security-model.md │ └── troubleshooting.md ├── quadlet ├── lme-backups.volume ├── lme-elastalert.container ├── lme-elasticsearch.container ├── lme-esdata01.volume ├── lme-fleet-server.container ├── lme-kibana.container ├── lme-kibanadata.volume ├── lme-setup-accts.container ├── lme-setup-certs.container ├── lme-wazuh-manager.container ├── lme.network └── lme.service └── scripts ├── check_fleet_api.sh ├── check_password.sh ├── download.sh ├── extract_secrets.sh ├── gen_cert.sh ├── install_sysmon.ps1 ├── link_latest_podman_quadlet.sh ├── password_management.sh ├── set-fleet.sh ├── set_sysctl_limits.sh ├── set_vault_key_env.sh ├── upgrade ├── README.md ├── export_1x.sh ├── export_dashboards.py ├── fix_dashboard_titles.sh ├── import_1x.sh ├── import_dashboards.sh ├── remove_volumes.sh ├── requirements.txt └── uninstall_docker.sh ├── upload.sh └── wazuh_rbac.sh /.devcontainer/python_development/devcontainer.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "Python Development", 3 | "dockerComposeFile": [ 4 | "../../testing/v2/development/docker-compose.yml" 5 | ], 6 | "service": "ubuntu", 7 | "shutdownAction": "none", 8 | "workspaceFolder": "/root/LME", 9 | "customizations": { 10 | "vscode": { 11 | "extensions": [ 12 | "ms-python.python", 13 | "littlefoxteam.vscode-python-test-adapter", 14 | "ms-python.black-formatter" 15 | ] 16 | } 17 | }, 18 | "remoteUser": "root" 19 | } -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | testing export-ignore 2 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug-or-error-report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug or Error report 3 | about: Report issues, mistakes, unsolvable, or unresolved errors to help improve the project 4 | title: "[BUG] ERROR YYYYY in step X.X" 5 | labels: bug 6 | assignees: '' 7 | 8 | --- 9 | 10 | ## **BEFORE CREATING THE ISSUE, CHECK THE FOLLOWING GUIDES**: 11 | - [ ] [FAQ](https://github.com/cisagov/LME/blob/main/docs/markdown/reference/faq.md) 12 | - [ ] [Troubleshooting](https://github.com/cisagov/LME/blob/main/docs/markdown/reference/troubleshooting.md) 13 | - [ ] Search current/closed issues for similar questions, and utilize github/google search to see if an answer exists for the error I'm encountering. 14 | 15 | If the above did not answer your question, proceed with creating an issue below: 16 | 17 | ## Describe the bug 18 | 19 | 20 | ## To Reproduce 21 | 22 | 23 | 24 | ### Please complete the following information 25 | #### **Desktop:** 26 | - OS: [e.g. Windows 10] 27 | - Browser: [e.g. Firefox Version 104.0.1] 28 | - Software version: [e.g. Sysmon v15.0, Winlogbeat 8.11.1] 29 | 30 | #### **Server:** 31 | - OS: [e.g. Ubuntu 22.04] 32 | - Software Versions: 33 | - ELK: [e.g. 8.7.1] 34 | - Docker: [e.g. 20.10.23, build 7155243] 35 | 36 | **OPTIONAL**: 37 | - The output of these commands: 38 | ``` 39 | free -h 40 | df -h 41 | uname -a 42 | lsb_release -a 43 | ``` 44 | - Relevant container logs: 45 | ``` 46 | for name in $(sudo docker ps -a --format '{{.Names}}'); do echo -e "\n\n\n-----------$name----------"; sudo docker logs $name | tail -n 20; done 47 | ``` 48 | Increase the number of lines if your issue is not present, or include a relevant log of the erroring container 49 | - Output of the relevant /var/log/cron_logs/ file 50 | 51 | ## Expected behavior 52 | A clear and concise description of what you expected to happen. 53 | 54 | ## Screenshots **OPTIONAL** 55 | If applicable, add screenshots to help explain your problem. 56 | 57 | ## Additional context 58 | Add any other context about the problem here. 59 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Is your feature request related to a problem? Please describe.** 11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 12 | 13 | **Describe the solution you'd like** 14 | A clear and concise description of what you want to happen. 15 | 16 | **Describe alternatives you've considered** 17 | A clear and concise description of any alternative solutions or features you've considered. 18 | 19 | **Additional context** 20 | Add any other context or screenshots about the feature request here. 21 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | 2 | ## 🗣 Description ## 3 | 4 | 5 | 6 | ### 💭 Motivation and context 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | ### 📷 Screenshots (DELETE IF UNAPPLICABLE) 17 | 18 | ## 🧪 Testing 19 | 20 | 21 | 22 | 23 | 24 | ## ✅ Pre-approval checklist ## 25 | 26 | - [ ] Changes are limited to a single goal **AND** 27 | the title reflects this in a clear human readable format 28 | - [ ] Issue that this PR solves has been selected in the Development section 29 | - [ ] I have read and agree to LME's [CONTRIBUTING.md](https://github.com/cisagov/LME/CONTRIBUTING.md) document. 30 | - [ ] The PR adheres to LME's requirements in [RELEASES.md](https://github.com/cisagov/LME/RELEASES.md#steps-to-submit-a-PR) 31 | - [ ] These code changes follow [cisagov code standards](https://github.com/cisagov/development-guide). 32 | - [ ] All relevant repo and/or project documentation has been updated to reflect the changes in this PR. 33 | 34 | ## ✅ Pre-merge Checklist 35 | 36 | - [ ] All tests pass 37 | - [ ] PR has been tested and the documentation for testing is above 38 | - [ ] Squash and merge all commits into one PR level commit 39 | 40 | ## ✅ Post-merge Checklist 41 | 42 | - [ ] Delete the branch to keep down number of branches 43 | 44 | -------------------------------------------------------------------------------- /.github/README-github.md: -------------------------------------------------------------------------------- 1 | See the readme in `testing/development` for more information about these workflows and how to develop for them. -------------------------------------------------------------------------------- /.github/changelog-configuration.json: -------------------------------------------------------------------------------- 1 | { 2 | "categories": [ 3 | { 4 | "title": "## What's Added", 5 | "labels": ["feat"], 6 | }, 7 | { 8 | "title": "## What's Fixed", 9 | "labels": ["fix"], 10 | }, 11 | { 12 | "title": "## What's Updated", 13 | "labels": ["update"], 14 | }, 15 | { 16 | "title": "## Uncategorized", 17 | "labels": [], 18 | }, 19 | ], 20 | "template": "#{{CHANGELOG}}", 21 | "pr_template": "* #{{TITLE}} by @#{{AUTHOR}} in ##{{NUMBER}}" 22 | } 23 | -------------------------------------------------------------------------------- /.github/workflows/build_release.yaml: -------------------------------------------------------------------------------- 1 | on: 2 | workflow_dispatch: 3 | inputs: 4 | version: 5 | description: "Release version (e.g., 1.1.0)" 6 | required: true 7 | type: string 8 | 9 | name: Build Release 10 | 11 | permissions: 12 | contents: write 13 | 14 | jobs: 15 | build-release: 16 | runs-on: ubuntu-latest 17 | steps: 18 | - name: Checkout 19 | uses: actions/checkout@v4 20 | 21 | - name: Get current date 22 | id: date 23 | run: | 24 | echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_ENV 25 | 26 | - name: Build Assets 27 | run: git ls-files | zip LME-${{ inputs.version }}.zip -@ 28 | 29 | - name: Create Draft Release 30 | uses: softprops/action-gh-release@v0.1.15 31 | with: 32 | name: LME v${{ inputs.version }} 33 | tag_name: v${{ inputs.version }} 34 | body: | 35 | ## [${{ inputs.version }}] - Timberrrrr! - ${{ env.date }} 36 | append_body: true 37 | files: LME-${{ inputs.version }}.zip 38 | draft: true 39 | prerelease: false 40 | discussion_category_name: "Announcements" 41 | generate_release_notes: true 42 | fail_on_unmatched_files: true 43 | -------------------------------------------------------------------------------- /.github/workflows/burndown_chart.yml: -------------------------------------------------------------------------------- 1 | name: Burndown Chart 2 | 3 | on: 4 | workflow_dispatch: 5 | inputs: 6 | start_date: 7 | description: 'Sprint start date (YYYY-MM-DD)' 8 | required: true 9 | default: '2024-05-09' 10 | type: string 11 | end_date: 12 | description: 'Sprint end date (YYYY-MM-DD)' 13 | required: true 14 | default: '2024-05-25' 15 | type: string 16 | view: 17 | description: 'View number' 18 | required: true 19 | default: '1' 20 | type: string 21 | pull_request: 22 | branches: 23 | - '*' 24 | 25 | jobs: 26 | create_chart: 27 | runs-on: ubuntu-latest 28 | env: 29 | UNIQUE_ID: 30 | start_date: 31 | end_date: 32 | view: 33 | 34 | steps: 35 | - name: Checkout repository 36 | uses: actions/checkout@v4.1.1 37 | 38 | - name: Setup environment variables 39 | run: | 40 | echo "UNIQUE_ID=$(openssl rand -hex 3 | head -c 6)" >> $GITHUB_ENV 41 | 42 | - name: Set default dates 43 | if: github.event_name == 'pull_request' 44 | run: | 45 | echo "start_date=2024-05-09" >> $GITHUB_ENV 46 | echo "end_date=2024-05-25" >> $GITHUB_ENV 47 | echo "view=1" >> $GITHUB_ENV 48 | 49 | - name: Use dispatch inputs 50 | if: github.event_name == 'workflow_dispatch' 51 | run: | 52 | echo "start_date=${{ github.event.inputs.start_date }}" >> $GITHUB_ENV 53 | echo "end_date=${{ github.event.inputs.end_date }}" >> $GITHUB_ENV 54 | echo "view=${{ github.event.inputs.view }}" >> $GITHUB_ENV 55 | 56 | - name: Run Docker Build 57 | run: docker compose -p ${{ env.UNIQUE_ID }} -f testing/project_management/docker-compose.yml build burndown --no-cache 58 | 59 | - name: Run Docker Compose 60 | env: 61 | BURNDOWN_TOKEN: ${{ secrets.BURNDOWN_TOKEN }} 62 | run: docker compose -p ${{ env.UNIQUE_ID }} -f testing/project_management/docker-compose.yml up -d 63 | 64 | - name: List docker containers to wait for them to start 65 | run: | 66 | docker ps 67 | 68 | - name: Set up the burndown chart config 69 | env: 70 | BURNDOWN_TOKEN: ${{ secrets.BURNDOWN_TOKEN }} 71 | UNIQUE_ID: ${{ env.UNIQUE_ID }} 72 | START_DATE: ${{ env.start_date }} 73 | END_DATE: ${{ env.end_date }} 74 | VIEW: ${{ env.view }} 75 | run: | 76 | cd testing/project_management 77 | docker compose -p ${{ env.UNIQUE_ID }} exec -T burndown bash -c ' 78 | /lme/testing/project_management/setup_config.sh -s ${{ env.START_DATE }} -e ${{ env.END_DATE }} -v ${{ env.VIEW }} -f /github-projects-burndown-chart/src/github_projects_burndown_chart/config/config.json 79 | sed -i "s/\"github_token\": \"\"/\"github_token\": \"$BURNDOWN_TOKEN\"/g" /github-projects-burndown-chart/src/github_projects_burndown_chart/config/secrets.json 80 | cat /github-projects-burndown-chart/src/github_projects_burndown_chart/config/config.json 81 | ' 82 | 83 | - name: Run the burndown chart script 84 | run: | 85 | cd testing/project_management 86 | docker compose -p ${{ env.UNIQUE_ID }} exec -T burndown bash -c ' 87 | python3 /github-projects-burndown-chart/src/github_projects_burndown_chart/main.py organization LME --filepath /lme/burndown.png 88 | ' 89 | - name: Upload chart artifact 90 | uses: actions/upload-artifact@v4 91 | with: 92 | name: burndown 93 | path: burndown.png 94 | 95 | - name: Cleanup Docker Compose 96 | if: always() 97 | run: | 98 | cd testing/project_management 99 | docker compose -p ${{ env.UNIQUE_ID }} down 100 | # docker system prune -a --force -------------------------------------------------------------------------------- /.github/workflows/main.yml: -------------------------------------------------------------------------------- 1 | on: 2 | workflow_dispatch: 3 | push: 4 | branches: 5 | - main 6 | tags: 7 | - 'v[0-9]+.[0-9]+.[0-9]+*' # match basic semver tags 8 | pull_request: 9 | branches: 10 | - main 11 | - 'release-*' 12 | 13 | jobs: 14 | lint: 15 | runs-on: ubuntu-latest 16 | steps: 17 | - name: Checkout 18 | uses: actions/checkout@v4 19 | 20 | - name: Lint Shell Scripts 21 | continue-on-error: true 22 | run: | 23 | sudo apt-get update 24 | sudo apt-get install shellcheck 25 | shellcheck **/*.sh 26 | 27 | - name: Lint PowerShell Scripts 28 | continue-on-error: true 29 | run: | 30 | pwsh -Command "Invoke-ScriptAnalyzer -EnableExit -Recurse -Path ." 31 | 32 | - name: Lint Lua 33 | continue-on-error: true 34 | run: | 35 | sudo apt-get install -y luarocks 36 | sudo luarocks install luacheck 37 | luacheck **/*.lua 38 | 39 | - name: Lint TeX Files 40 | continue-on-error: true 41 | run: | 42 | sudo apt-get install chktex 43 | chktex **/*.tex 44 | 45 | - name: Lint YAML Files 46 | continue-on-error: true 47 | run: | 48 | sudo apt-get update 49 | sudo apt-get install yamllint 50 | yamllint -f parsable **/*.yml 51 | 52 | semgrep-scan: 53 | runs-on: ubuntu-latest 54 | container: 55 | image: returntocorp/semgrep:latest 56 | steps: 57 | - name: Checkout repository 58 | uses: actions/checkout@v4 59 | 60 | - name: Semgrep Scan 61 | continue-on-error: true 62 | run: | 63 | semgrep --config "p/r2c" . 64 | 65 | 66 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.pdf 2 | *.docx 3 | .DS_Store 4 | /.idea/ 5 | /.vscode/ 6 | **/.env 7 | /Chapter 4 Files/*.dumped.ndjson 8 | /Chapter 4 Files/exported/ 9 | 10 | #created files should be ignored: 11 | Chapter 3 Files/certs/ 12 | Chapter 3 Files/docker-compose-stack-live.yml 13 | Chapter 3 Files/logstash.edited.conf 14 | Chapter 3 Files/logstash_custom.conf 15 | LME/ 16 | files_for_windows.zip 17 | lme.conf 18 | **/venv/ 19 | /testing/tests/.env 20 | **/.pytest_cache/ 21 | **/__pycache__/ 22 | /testing/*.password.txt 23 | /testing/configure/azure_scripts/config.ps1 24 | /testing/configure.zip 25 | /testing/*.output.log 26 | /testing/tests/report.html 27 | testing/tests/assets/style.css 28 | .history/ 29 | **/get-docker.sh 30 | *.vim 31 | **.password.txt 32 | **.ip.txt 33 | **.swp 34 | *.vim* 35 | **/quadlet/output 36 | **/lme-environment.env 37 | **/env.sh 38 | testing/v2/installers/envirnment.sh 39 | docker/**/environment.sh 40 | **exporter.txt -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Welcome # 2 | 3 | We're so glad you're thinking about contributing to this open-source project! If you're unsure or hesitant to make a recommendation, just ask, submit the issue, or pull request. The worst that can happen is that you'll be politely asked to change something. We appreciate any sort of contribution(s), and don't want a wall of rules to stifle innovation. 4 | 5 | Before contributing, we encourage you to read our CONTRIBUTING policy (you are here), our LICENSE, and our README, all of which are in this repository. 6 | 7 | ## Issues 8 | 9 | If you want to report a bug or request a new feature, the most direct method is to [create an issue](https://github.com/cisagov/development-guide/issues) in this repository. 10 | We recommend that you first search through existing issues (both open and closed) to check if your particular issue has already been reported. 11 | 12 | If it has then you might want to add a comment to the existing issue. 13 | 14 | If it hasn't then please create a new one. 15 | 16 | Please follow the provided template and fill out all sections. We have a `BUG` and `FEATURE REQUEST` Template 17 | 18 | ## Branch naming conventions 19 | 20 | If you are planning to submit a pull request, please name your branch using the following naming convention: 21 | `--` 22 | 23 | Example: 24 | `mreeve-22-filter-events` 25 | 26 | ## Pull Requests (PR) 27 | 28 | If you choose to submit a pull request, it will be required to pass various sanity checks in our continuous integration (CI) pipeline, before we merge it. Your pull request may fail these checks, and that's OK. If you want you can stop there and wait for us to make the necessary corrections to ensure your code passes the CI checks, you're more than within your rights; however, it helps our team greatly if you fix the issues found by our CI pipeline. 29 | 30 | Below are some loose requirements we'd like all PR's to follow. Our release process is documented in [Releases](RELEASES.md). 31 | 32 | ### Quality assurance and code reviews 33 | 34 | All PRs will be tested, vetted, and reviewed by our team before being merged with the main code base. All should be pull requested into the `develop` branch. 35 | 36 | ### Steps to submit a PR 37 | - All PRs should request merges back into LME's `develop` branch. This will be viewable in the branch list on Github. You can also refer to our [release documentation](https://github.com/cisagov/LME/blob/main/RELEASES.md) for guidance. If the fix fits the requirements for a hotfix, the LME team will modify your PR as is relevant. 38 | - If the PR corresponds to an issue we are already tracking on LME's public Github [project](https://github.com/orgs/cisagov/projects/68), please comment the PR in the issue, and we will update the issue. 39 | - If the PR does not have an issue, please create a new issue and name your branch according to the conventions [here](#branch-naming-conventions). Add a comment at the top of the pull request describing the PR and how it fits into LME's project/code. If the PR follows our other requirements listed here, we'll add it into our public project linked previously. 40 | - We'll work with you to mold it to our development goals/process, so your work can be merged into LME and your Github profile gets credit for the contributions. 41 | - Before merging we request that all commits be squashed into one commit. This way your changes to the repository are tracked, but our `git log` history does not rapidly expand. 42 | - Thanks for wanting to submit and develop improvements for LME!! 43 | 44 | ## Public domain 45 | 46 | This project is in the public domain within the United States, and 47 | copyright and related rights in the work worldwide are waived through 48 | the [CC0 1.0 Universal public domain 49 | dedication](https://creativecommons.org/publicdomain/zero/1.0/). 50 | 51 | All contributions to this project will be released under the CC0 52 | dedication. By submitting a pull request, you are agreeing to comply 53 | with this waiver of copyright interest. 54 | -------------------------------------------------------------------------------- /RELEASES.md: -------------------------------------------------------------------------------- 1 | # Release Workflow: 2 | 3 | ## SEMVER Number Decisions 4 | 5 | Our versioning scheme for LME adheres to [SEMVER 2.0](https://semver.org/): X.Y.Z (Major.Minor.Patch). 6 | The patch versions will generally adhere to the following guidelines: 7 | 1. Major SEMVER: Denotes a major release, e.g., a new capability, or LME architecture change. 8 | 2. Minor SEMVER: Denotes updates which are less than major but introduces noticeable changes. 9 | 3. Patch SEMVER: Fix product breaking bugs, or vulnerabilities, or key documentation issues, but do not introduce new features or updates. 10 | 11 | ### Timelines 12 | 13 | Development lifecycle timelines will vary depending on project goals, tasking, community contributions, and vision. 14 | 15 | 16 | ## Branch Convention: 17 | We are using a Github flow denoted by: 18 | 19 | ![git-flow](/docs/imgs/git-flow.png) 20 | 21 | The team requests a brief description if you submit a fix for a current issue on the public project, that context will allow us to help determine if it warrants inclusion. If the PR is well documented following our processes in our [CONTRIBUTING.md](https://github.com/cisagov/LME/blob/main/CONTRIBUTING.md), it will most likely be worked into LME. We value inclusion and recognize the importance of the open-source community. 22 | 23 | ### Branch Naming Explained: 24 | We have 2 main branches whose names will stay constant: 25 | 1. The `main` branch tracks Major/Minor/Patch releases only, and is only updated with merges from the `develop` or a `hotfix` branch. Releases are tagged appropriate SEMVERs based on their content: `vX.Y.Z`. 26 | 2. The `develop` branch is our working copy of latest changes, and tracks all feature development. Feature branches are merged into `develop` as features are added, and when ready `develop` will merge into main as documented above. 27 | 28 | There are 2 other branch naming conventions that change based on the issue/update/content they add to the project. 29 | 1. A `hotfix` branch is created to "fix" or "patch" a critical issue in the `main` branch of the LME repository. Hotfixes are branched from `main` and merged into `develop`. This way `main` can get fixes, and `develop` will be synced with `main`. This process side-steps the normal `feature` -> `develop` -> commit release -> `main` workflow. Once the hotfix PR is finalized/approved, and merged into main, finally we execute a merge commit of `main` into `develop`. 30 | - It uses the convention: `hotfix---` 31 | - An example: `hotfix-cbaxley-222-fix-the-pipeline` 32 | 2. A feature branch is created from `develop` to add content for issues/work/updates/etc... 33 | - It uses the convention: `--shortstring` 34 | - An example: `mreeve-22-filter-events` 35 | 36 | **NOTE:** Each branch name will have a short string to describe what it is solving for example `create-new-container`. 37 | 38 | ### Post merge: 39 | Any branch other than develop/main should be deleted to preserve readability in github's UI. 40 | 41 | Commands to merge main back into develop: 42 | ```bash 43 | #in a previously cloned LME git repository: 44 | git pull 45 | git checkout develop 46 | git merge main 47 | #push up the new develop branch that is synced with main 48 | git push -f 49 | ``` 50 | ## Content: 51 | 52 | Each release generally notes the Additions, Changes, and Fixes addressed in the release and the contributors that provided code for the release. Additionally, relevant builds of the release will be attached with the release. Tagging the release will correspond with its originating branch's SEMVER number. 53 | 54 | ## Update Process: 55 | 56 | ### Code Freeze: 57 | Each code freeze will have an announced end date/time in accordance with our public [project](https://github.com/orgs/cisagov/projects/68). Any PRs with new content will need to be merged into `develop` by the announced time in order to be included into the release. 58 | 59 | ### Steps: 60 | 61 | 1. Goals/changes/updates to LME will be tracked in LME's public [project](https://github.com/orgs/cisagov/projects/68). These updates to LME will be tracked by pull requests (and may be backed by corresponding issues for documentation purposes for documentation purposes) into the `develop` branch. 62 | 2. As commits are pushed to the PRs set to pull into the `develop` branch, we will determine a time to cease developments, and mark a period of testing for `development` that will be merged into main. 63 | 3. When its determined the features developed meet a goal or publish point, after waiting for feedback and proper testing, we will merge `develop` with a `vX.Y.Z` semver tag into `main` branch. 64 | 65 | ### Caveats: 66 | Major or Minor SEMVER LME versions will only be pushed to `main` with testing and validation of code to ensure stability and compatibility. However, new major changes will not always be backwards compatible. 67 | 68 | -------------------------------------------------------------------------------- /build/Readme.md: -------------------------------------------------------------------------------- 1 | # Generating the docs: 2 | 3 | This directory uses [pandoc]() a universal document converter to build the markdown files into a pdf. Due to regulatory concerns we cannot release a pdf here directly, but you can utilize the following script to build the markdown docs into a pdf so you can use them offline if desired. 4 | 5 | In our testing we utilized the macos package manager [homebrew](https://brew.sh/) to install our packages. 6 | 7 | ## Installing pandoc 8 | 9 | After you have homebrew make sure to install mactex: 10 | ```bash 11 | brew install mactex 12 | ``` 13 | Its a huge file but makes compiling everything super easy. Theres probably an equivalent on linux, but idk what it is 14 | 15 | Finally install pandoc: [link](https://pandoc.org/installing.html) 16 | ```bash 17 | brew install pandoc 18 | ``` 19 | 20 | ### Installing on other platforms 21 | Other operating systems adn their respecitve latex/pandoc packages have not been tested nor will they be supported by LME. Since not every organization will have access to a MacOS operating system, but might wish to compile the docs anyway, please reachout and the team will attempt to help you compile the docs into a pdf. Any operating system with a latex package and pandoc executable should be able to accomplish the job. There are also many other ways to convert github flavored markdown to pdf if you google for them, and want to compile using a different method than we've provided here. 22 | 23 | ## Compiling: 24 | This command below will compile the markdown docs on macos from the homebrew install pandoc/mactex packages: 25 | ```bash 26 | pandoc --from gfm --pdf-engine=lualatex -H ./build/setup.tex -V geometry:margin=1in --highlight-style pygments -o docs.pdf -V colorlinks=true -V linkcolor=blue --lua-filter=./build/emoji-filter.lua --lua-filter=./build/makerelativepaths.lua --lua-filter=./build/parse_breaks.lua --table-of-contents --number-sections --wrap=preserve --quiet -s $(cat ./build/includes.txt) 27 | ``` 28 | 29 | On a successful compilation it will output the `docs.pdf` file, a pdf of all the docs. There is a small bug where the `troubleshooting.md` table does not display as expected, so if you want the notes in the table offline, we suggest you record the information manually, OR submit a pull request that fixes this bug :smile:. 30 | 31 | ### Compiling .docx: 32 | .docx doesn't support emojis, so thats removed from the command 33 | ```bash 34 | pandoc --from gfm --pdf-engine=lualatex -H ./build/setup.tex -V geometry:margin=1in --highlight-style pygments -o docs.docx -V colorlinks=true -V linkcolor=blue --lua-filter=./build/makerelativepaths.lua --lua-filter=./build/parse_breaks.lua --table-of-contents --number-sections --wrap=preserve --quiet -s $(cat ./build/includes.txt) 35 | ``` 36 | 37 | -------------------------------------------------------------------------------- /build/includes.txt: -------------------------------------------------------------------------------- 1 | Readme.md 2 | ./docs/markdown/prerequisites.md 3 | ./docs/markdown/logging-guidance/cloud.md 4 | ./docs/markdown/logging-guidance/filtering.md 5 | ./docs/markdown/logging-guidance/retention.md 6 | ./docs/markdown/reference/dashboard-descriptions.md 7 | ./docs/markdown/reference/faq.md 8 | ./docs/markdown/reference/security-model.md 9 | ./docs/markdown/reference/troubleshooting.md 10 | ./docs/markdown/maintenance/backups.md 11 | ./docs/markdown/maintenance/certificates.md 12 | ./docs/markdown/maintenance/elastalert-rules.md 13 | ./docs/markdown/maintenance/Encryption_at_rest_option_for_users.md 14 | ./docs/markdown/maintenance/index-management.md 15 | ./docs/markdown/maintenance/upgrading.md 16 | ./docs/markdown/maintenance/volume-management.md 17 | ./docs/markdown/maintenance/vulnerability-scan-setup.md 18 | ./docs/markdown/maintenance/wazuh-configuration.md 19 | ./docs/markdown/agents/elastic-agent-management.md 20 | ./docs/markdown/agents/wazuh-active-response.md 21 | ./docs/markdown/agents/wazuh-agent-management.md 22 | ./docs/markdown/endpoint-tools/install-auditd.md 23 | ./docs/markdown/endpoint-tools/install-sysmon.md 24 | -------------------------------------------------------------------------------- /build/makerelativepaths.lua: -------------------------------------------------------------------------------- 1 | function Image (img) 2 | 3 | --remove invalid urls 4 | if string.find(img.src, "shields.io") then 5 | img.src = "" 6 | return img 7 | end 8 | 9 | --makes paths relative so that links resolve on pandoc compile 10 | img.src = pandoc.path.make_relative(img.src, '/') 11 | 12 | return img 13 | end 14 | 15 | -------------------------------------------------------------------------------- /build/parse_breaks.lua: -------------------------------------------------------------------------------- 1 | --- Transform a raw HTML element which contains only a `
` 2 | -- into a format-indepentent line break. 3 | function RawInline (el) 4 | if el.format:match '^html' and el.text:match '%' then 5 | return pandoc.LineBreak() 6 | end 7 | end 8 | -------------------------------------------------------------------------------- /build/setup.tex: -------------------------------------------------------------------------------- 1 | % Contents of listings-setup.tex 2 | \usepackage{fvextra} 3 | \usepackage[utf8]{inputenc} 4 | 5 | \usepackage{lscape,longtable} 6 | 7 | %Note: be sure to put extra line between multi-line code, it will look bad otherwise! 8 | \DefineVerbatimEnvironment{Highlighting}{Verbatim}{breaklines, 9 | commandchars=\\\{\}, 10 | breaksymbolleft=, 11 | frame=single, 12 | } 13 | 14 | %if converting emojis, requires lualatex: 15 | \usepackage{emoji} 16 | 17 | 18 | \iffalse Comments below are for myself in future work: 19 | \fi 20 | -------------------------------------------------------------------------------- /config/containers.conf: -------------------------------------------------------------------------------- 1 | [secrets] 2 | driver = "shell" 3 | 4 | [secrets.opts] 5 | list = "ls /opt/lme/vault/" 6 | lookup = "ansible-vault view /opt/lme/vault/$SECRET_ID" 7 | store = "cat > /opt/lme/vault/$SECRET_ID; ansible-vault encrypt /opt/lme/vault/$SECRET_ID" 8 | delete = "rm /opt/lme/vault/$SECRET_ID" 9 | -------------------------------------------------------------------------------- /config/containers.txt: -------------------------------------------------------------------------------- 1 | docker.elastic.co/elasticsearch/elasticsearch:8.15.3 2 | docker.elastic.co/beats/elastic-agent:8.15.3 3 | docker.elastic.co/kibana/kibana:8.15.3 4 | docker.io/wazuh/wazuh-manager:4.9.1 5 | docker.io/jertel/elastalert2:2.20.0 6 | -------------------------------------------------------------------------------- /config/elastalert2/config.yaml: -------------------------------------------------------------------------------- 1 | run_every: 2 | minutes: 1 3 | 4 | buffer_time: 5 | minutes: 15 6 | 7 | writeback_index: elastalert_status 8 | 9 | alert_time_limit: 10 | days: 2 11 | 12 | es_host: lme-elasticsearch 13 | es_port: 9200 14 | use_ssl: true 15 | verify_certs: false 16 | 17 | #exists in the container 18 | rules_folder: /opt/elastalert/rules 19 | -------------------------------------------------------------------------------- /config/elastalert2/misc/smtp_auth.yml: -------------------------------------------------------------------------------- 1 | user: "loggingmadeeasy@gmail.com" 2 | password: "giyq caym zqiw chje" #this is your app password if using gmail 3 | -------------------------------------------------------------------------------- /config/elastalert2/rules/example-email-rule.yml: -------------------------------------------------------------------------------- 1 | name: EMAIL 2 | type: frequency 3 | index: wazuh-* 4 | num_events: 1 5 | timeframe: 6 | minutes: 1 7 | filter: 8 | - query: 9 | match_phrase: 10 | agent.ip: "10.1.0.4" 11 | alert: email 12 | alert_text: "ASDFASDF" 13 | alert_text_type: alert_text_only 14 | email: 15 | - "loggingmadeeasy@gmail.com" 16 | smtp_ssl: true 17 | smtp_port: 465 18 | smtp_host: "smtp.gmail.com" 19 | from_addr: "elastalert@elastalert.com" 20 | smtp_auth_file: /opt/elastalert/misc/smtp_auth.yml 21 | 22 | -------------------------------------------------------------------------------- /config/elastalert2/rules/windows_event_logs_cleared.yaml: -------------------------------------------------------------------------------- 1 | name: Windows Event Logs Cleared 2 | 3 | # Type of rule 4 | type: any 5 | 6 | # Index pattern to search 7 | index: logs-* 8 | 9 | # Elasticsearch query in DSL format 10 | filter: 11 | - query: 12 | bool: 13 | must: 14 | - terms: 15 | event.action: ["audit-log-cleared", "Log clear"] 16 | - term: 17 | winlog.api: "wineventlog" 18 | must_not: 19 | - term: 20 | winlog.provider_name: "AD FS Auditing" 21 | 22 | # Alert when conditions are met 23 | alert: 24 | - "slack" 25 | 26 | # Slack alert details 27 | slack_webhook_url: "https://hooks.slack.com/services/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" 28 | slack_username_override: "Windows Security Alert" 29 | slack_msg_color: "danger" 30 | slack_emoji_override: ":rotating_light:" 31 | 32 | # Alert message format 33 | alert_text: | 34 | Windows Event Logs Cleared Detected! 35 | Host: {0} 36 | Event Action: {1} 37 | Winlog Provider Name: {2} 38 | Timestamp: {3} 39 | alert_text_args: 40 | - host.name 41 | - event.action 42 | - winlog.provider_name 43 | - "@timestamp" 44 | 45 | # Alert text only, without additional metadata 46 | alert_text_type: alert_text_only 47 | 48 | # Frequency for querying Elasticsearch 49 | realert: 50 | minutes: 5 51 | 52 | # Optional timestamp field to use for events 53 | timestamp_field: "@timestamp" -------------------------------------------------------------------------------- /config/elasticsearch.yml: -------------------------------------------------------------------------------- 1 | cluster.name: "docker-cluster" 2 | network.host: 0.0.0.0 3 | path: 4 | repo: 5 | - /usr/share/elasticsearch 6 | - /usr/share/elasticsearch/backups 7 | 8 | -------------------------------------------------------------------------------- /config/example.env: -------------------------------------------------------------------------------- 1 | ###################################### 2 | ## MAKE SURE TO SET THE BELOW VALUE: # 3 | ###################################### 4 | #IP of your host machine 5 | IPVAR=127.0.0.1 6 | 7 | # ElasticSearch settings 8 | ######################## 9 | 10 | #TODO: this will be needed for scaling, not needed right now 11 | # the names of the OS nodes 12 | #ES_NODE1=es01 13 | # uncomment to create a cluster (more nodes can be added also) 14 | # !!! do not forget to also adjust the docker-compose.yml file !!! 15 | # ES_NODE2=es02 16 | 17 | # Local Kibana URL 18 | LOCAL_KBN_URL=https://127.0.0.1:5601 19 | # Local ES URL 20 | LOCAL_ES_URL=https://127.0.0.1:9200 21 | 22 | # Elastic settings 23 | ################# 24 | 25 | # Version of Elastic products 26 | STACK_VERSION=8.15.3 27 | # Testing pre-releases? Use the SNAPSHOT option below: 28 | # STACK_VERSION=8.11.0-SNAPSHOT 29 | # 30 | # Set the cluster name 31 | CLUSTER_NAME=LME 32 | 33 | #User info: 34 | #Username used by elastic service for admin, currently this is static 35 | ELASTIC_USERNAME=elastic 36 | # Password for the 'elastic' user (at least 6 characters) 37 | # ansible-vault: elastic 38 | #ELASTIC_PASSWORD=password1 39 | 40 | #Username used by kibana, currently this is static 41 | ELASTICSEARCH_USERNAME=kibana_system 42 | # Password for the 'kibana_system' user (at least 6 characters) 43 | # ansible-vault: kibana_system 44 | #KIBANA_PASSWORD=password1 45 | 46 | #Fleet: 47 | KIBANA_FLEET_USERNAME=elastic 48 | # ansible-vault: elastic 49 | #KIBANA_FLEET_PASSWORD=password1 50 | 51 | #Wazuh: 52 | # ansible-vault: wazuh 53 | #WAZUH_PASSWORD=MyP@ssw0rd1# 54 | INDEXER_USERNAME=elastic 55 | # ansible-vault: elastic 56 | #INDEXER_PASSWORD=password1 57 | API_USERNAME=wazuh-wui 58 | # ansible-vault: wazuh_api 59 | #API_PASSWORD=MyP@ssw0rd1# 60 | 61 | # Set to "basic" or "trial" to automatically start the 30-day trial 62 | LICENSE=basic 63 | 64 | #TODO: support changing these, right now they're static 65 | # Port to expose Elasticsearch HTTP API to the host 66 | ES_PORT=9200 67 | #ES_PORT=127.0.0.1:9200 68 | # Port to expose Kibana to the host 69 | KIBANA_PORT=5601 70 | # Port to expose Fleet to the host 71 | FLEET_PORT=8220 72 | 73 | # Increase or decrease based on the available host memory (in bytes) 74 | MEM_LIMIT=2073741824 75 | 76 | 77 | # Detection Settings: 78 | ################# 79 | #TODO: integrate fleet setup into postinstall ansible script 80 | # Bulk Enable Detection Rules by OS - change to "1" if you want to enable 81 | 82 | LinuxDR=0 83 | WindowsDR=0 84 | MacOSDR=0 85 | 86 | # Proxy Settings: 87 | # LEAVE BLANK IF NO PROXY! 88 | ################# 89 | 90 | # Standard certificate location for ubuntu 91 | #PROXY_CA_LOCATION=/etc/ssl/certs/ca-certificates.crt 92 | # Proxy Server URL 93 | #PROXY_URL= 94 | # IPs and host names you want the proxy to ignore. Typically want all private IP's and Docker network hostnames / IP's ignored 95 | # Example config: 96 | # 127.0.0.1,localhost,10.,172.16.,172.17.,192.168.,*.local,.local,169.254/16,lme-elasticsearch,lme-kibana,lme-fleet-server,lme-wazuh-manager 97 | #PROXY_IGNORE= 98 | #set these as well: 99 | #HTTP_PROXY= 100 | #HTTPS_PROXY= 101 | #NO_PROXY= 102 | -------------------------------------------------------------------------------- /config/kibana.yml: -------------------------------------------------------------------------------- 1 | xpack.encryptedSavedObjects.encryptionKey: "thirty-two-or-more-random-characters" 2 | server.host: "0.0.0.0" 3 | telemetry.enabled: "true" 4 | xpack.integration_assistant.enabled: false 5 | xpack.fleet.packages: 6 | - name: fleet_server 7 | version: latest 8 | - name: system 9 | version: latest 10 | xpack.fleet.agentPolicies: 11 | - name: Fleet-Server-Policy 12 | id: fleet-server-policy 13 | namespace: default 14 | package_policies: 15 | - name: fleet_server-1 16 | package: 17 | name: fleet_server 18 | 19 | -------------------------------------------------------------------------------- /config/setup/acct-init.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -euo pipefail 3 | 4 | CONFIG_DIR="/usr/share/elasticsearch/config" 5 | CERTS_DIR="${CONFIG_DIR}/certs" 6 | INSTANCES_PATH="${CONFIG_DIR}/setup/instances.yml" 7 | 8 | if [[ -z "${ELASTIC_PASSWORD:-}" || -z "${KIBANA_PASSWORD:-}" ]]; then 9 | echo "ERROR: ELASTIC_PASSWORD and/or KIBANA_PASSWORD are missing." 10 | exit 1 11 | fi 12 | 13 | if [ ! -f "${CERTS_DIR}/ACCOUNTS_CREATED" ]; then 14 | echo "Waiting for Elasticsearch availability"; 15 | until curl -s --cacert config/certs/ca/ca.crt https://lme-elasticsearch:9200 | grep -q "missing authentication credentials"; do echo "WAITING"; sleep 30; done; 16 | 17 | echo "Setting kibana_system password"; 18 | until curl -L -s -X POST --cacert config/certs/ca/ca.crt -u elastic:${ELASTIC_PASSWORD} -H "Content-Type: application/json" https://lme-elasticsearch:9200/_security/user/kibana_system/_password -d "{\"password\":\"${KIBANA_PASSWORD}\"}" | grep -q "^{}"; do sleep 2; done; 19 | 20 | echo "All done!" | tee "${CERTS_DIR}/ACCOUNTS_CREATED" ; 21 | fi 22 | echo "Accounts kibana_system Created!" 23 | -------------------------------------------------------------------------------- /config/setup/init-setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -euo pipefail 3 | 4 | if [[ -z "${ELASTIC_PASSWORD:-}" || -z "${KIBANA_PASSWORD:-}" ]]; then 5 | echo "ERROR: ELASTIC_PASSWORD and/or KIBANA_PASSWORD are missing." 6 | exit 1 7 | fi 8 | #echo $ELASTIC_PASSWORD 9 | #echo $KIBANA_PASSWORD 10 | 11 | CONFIG_DIR="/usr/share/elasticsearch/config" 12 | CERTS_DIR="${CONFIG_DIR}/certs" 13 | DATA_DIR="/usr/share/elasticsearch/data" 14 | INSTANCES_PATH="${CONFIG_DIR}/setup/instances.yml" 15 | 16 | if [ ! -f "${CERTS_DIR}/ca.zip" ]; then 17 | echo "Creating CA..." 18 | elasticsearch-certutil ca --silent --pem --out "${CERTS_DIR}/ca.zip" 19 | unzip -o "${CERTS_DIR}/ca.zip" -d "${CERTS_DIR}" 20 | fi 21 | 22 | if [ ! -f "${CERTS_DIR}/certs.zip" ]; then 23 | echo "Creating certificates..." 24 | elasticsearch-certutil cert --silent --pem --in "${INSTANCES_PATH}" --out "${CERTS_DIR}/certs.zip" --ca-cert "${CERTS_DIR}/ca/ca.crt" --ca-key "${CERTS_DIR}/ca/ca.key" 25 | unzip -o "${CERTS_DIR}/certs.zip" -d "${CERTS_DIR}" 26 | cat "${CERTS_DIR}/elasticsearch/elasticsearch.crt" "${CERTS_DIR}/ca/ca.crt" > "${CERTS_DIR}/elasticsearch/elasticsearch.chain.pem" 27 | 28 | echo "Setting file permissions... certs" 29 | chown -R elasticsearch:elasticsearch "${CERTS_DIR}" 30 | find "${CERTS_DIR}" -type d -exec chmod 755 {} \; 31 | find "${CERTS_DIR}" -type f -exec chmod 644 {} \; 32 | 33 | echo "Setting file permissions... data" 34 | chown -R elasticsearch:elasticsearch "${DATA_DIR}" 35 | fi 36 | 37 | -------------------------------------------------------------------------------- /config/setup/instances.yml: -------------------------------------------------------------------------------- 1 | # Add host IP address / domain names as needed. 2 | 3 | instances: 4 | - name: "elasticsearch" 5 | dns: 6 | - "lme-elasticsearch" 7 | - "localhost" 8 | ip: 9 | - "127.0.0.1" 10 | 11 | - name: "kibana" 12 | dns: 13 | - "lme-kibana" 14 | - "localhost" 15 | ip: 16 | - "127.0.0.1" 17 | 18 | - name: "fleet-server" 19 | dns: 20 | - "lme-fleet-server" 21 | - "localhost" 22 | ip: 23 | - "127.0.0.1" 24 | 25 | - name: "wazuh-manager" 26 | dns: 27 | - "lme-wazuh-manager" 28 | - "localhost" 29 | ip: 30 | - "127.0.0.1" 31 | 32 | - name: "logstash" 33 | dns: 34 | - "logstash" 35 | - "localhost" 36 | ip: 37 | - "127.0.0.1" 38 | 39 | - name: "curator" 40 | dns: 41 | - "curator" 42 | - "localhost" 43 | ip: 44 | - "127.0.0.1" 45 | 46 | - name: "caddy" 47 | dns: 48 | - "lme-caddy" 49 | - "localhost" 50 | ip: 51 | - "127.0.0.1" 52 | -------------------------------------------------------------------------------- /dashboards/Readme.md: -------------------------------------------------------------------------------- 1 | # Folder for all the dashboards 2 | 3 | ## Wazuh Dashboards: 4 | For more info on these dashboards see wazuh's documentation: [LINK](https://documentation.wazuh.com/current/integrations-guide/elastic-stack/index.html) 5 | This is the dashboard URL: 6 | ```bash 7 | https://packages.wazuh.com/integrations/elastic/4.x-8.x/dashboards/wz-es-4.x-8.x-dashboards.ndjson 8 | ``` 9 | 10 | ## How to update dashboards 11 | Currently you need to run `ansible-playbook post_install_local.yml` to upload the current LME dashboards. 12 | 13 | ## Updating to new dashboards and removing old ones (Starting with 1.1.0) 14 | Browse to `Kibana->Stack Management` then select `Saved Objects`. 15 | On the Saved Objects page, you can filter by dashboards. 16 | 17 | Select the filter `Type` and select `dashboard`. 18 | 19 | * It is suggested that you export the dashboards first (readme below) so you have a backup. 20 | You can delete all of the dashboards before importing the new ones. 21 | 22 | 23 | ### Exporting dashboards: 24 | It is recommended that you export your dashboards before updating them, especially if you have customized them or created new ones. 25 | To export the dashboards use the `export_dashboards.py`. 26 | It is easiest to export them from the ubuntu machine where you have installed the ELK stack because the 27 | default port and hostname are in the script. You will need the user and password for elastic that were printed 28 | on your initial install. 29 | 30 | ##### The files will be exported to `./exported` 31 | 32 | #### Running on Ubuntu 33 | 34 | ``` 35 | ./export_dashboards.py -u elastic -p YOURUNIQUEPASS 36 | ``` 37 | 38 | The modules should already be installed on Ubuntu, but If the script complains about missing modules: 39 | ``` 40 | pip install -r requirements.txt 41 | ``` 42 | 43 | #### Running on Windows 44 | You must have python and the modules installed. (You can install python 3 from the Microsoft Store). Then install the requirements: 45 | ``` 46 | pip install -r requirements.txt 47 | ``` 48 | 49 | You will probably have to pass the host that you connect to for kibana when running on windows. 50 | ``` 51 | python .\export_dashboards.py -u elastic -p YOURUNIQUEPASS --host x.x.x.x 52 | ``` 53 | 54 | ## Customizing dashboards: 55 | When customizing dashboards keep in mind to be sure the name of the file does not conflict with one on git. In future iterations of LME, updates will overwrite any dashboard file that you have customized or named the same as an original file that appears in this directory. 56 | 57 | In addition, any other dashboards you want to save in git and track in this repository can maintained safely (assuming the new files do not overlap in name with any original file in LME) by doing the following: 58 | 1. Creating your own local branch in this LME repo 59 | 2. Commiting any changes 60 | 3. pulling in changes from `main` to your local repo 61 | 62 | 63 | -------------------------------------------------------------------------------- /dashboards/requirements.txt: -------------------------------------------------------------------------------- 1 | requests 2 | urllib3 -------------------------------------------------------------------------------- /docker/22.04/Dockerfile: -------------------------------------------------------------------------------- 1 | # Base stage with common dependencies 2 | FROM ubuntu:22.04 AS base 3 | 4 | ARG USER_ID=1001 5 | ARG GROUP_ID=1001 6 | 7 | ENV DEBIAN_FRONTEND=noninteractive \ 8 | LANG=en_US.UTF-8 \ 9 | LANGUAGE=en_US:en \ 10 | LC_ALL=en_US.UTF-8 11 | 12 | RUN apt-get update && apt-get install -y --no-install-recommends \ 13 | locales ca-certificates sudo sshpass openssh-client \ 14 | && locale-gen en_US.UTF-8 \ 15 | && update-locale LANG=en_US.UTF-8 LC_ALL=en_US.UTF-8 \ 16 | && groupadd -g $GROUP_ID lme-user \ 17 | && useradd -m -u $USER_ID -g lme-user --badnames lme-user \ 18 | && usermod -aG sudo lme-user \ 19 | && echo "lme-user ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers \ 20 | && apt-get clean \ 21 | && rm -rf /var/lib/apt/lists/* 22 | 23 | ENV LANG=en_US.UTF-8 LANGUAGE=en_US:en LC_ALL=en_US.UTF-8 24 | 25 | ENV BASE_DIR=/home/lme-user 26 | WORKDIR $BASE_DIR 27 | 28 | # Lme stage with full dependencies 29 | FROM base AS lme 30 | 31 | 32 | RUN apt-get update && apt-get install -y --no-install-recommends \ 33 | systemd systemd-sysv lsb-release python3 python3-venv python3-pip \ 34 | zip git curl wget cron pkg-config libcairo2-dev libdbus-1-dev \ 35 | distro-info libgirepository1.0-dev ansible python3-apt \ 36 | && apt-get clean \ 37 | && rm -rf /var/lib/apt/lists/* 38 | 39 | 40 | RUN cd /lib/systemd/system/sysinit.target.wants/ && \ 41 | ls | grep -v systemd-tmpfiles-setup | xargs rm -f $1 && \ 42 | rm -f /lib/systemd/system/multi-user.target.wants/* && \ 43 | rm -f /etc/systemd/system/*.wants/* && \ 44 | rm -f /lib/systemd/system/local-fs.target.wants/* && \ 45 | rm -f /lib/systemd/system/sockets.target.wants/*udev* && \ 46 | rm -f /lib/systemd/system/sockets.target.wants/*initctl* && \ 47 | rm -f /lib/systemd/system/basic.target.wants/* && \ 48 | rm -f /lib/systemd/system/anaconda.target.wants/* && \ 49 | mkdir -p /etc/systemd/system/systemd-logind.service.d && \ 50 | echo -e "[Service]\nProtectHostname=no" > /etc/systemd/system/systemd-logind.service.d/override.conf 51 | 52 | 53 | COPY docker/22.04/lme-setup.service /etc/systemd/system/ 54 | 55 | RUN chmod 644 /etc/systemd/system/lme-setup.service 56 | 57 | COPY ansible/install_lme_local.yml /root/LME/ansible/ 58 | COPY config/example.env /root/LME/config/lme-environment.env 59 | 60 | RUN ansible-playbook /root/LME/ansible/install_lme_local.yml --tags base 61 | 62 | # Enable the service 63 | RUN systemctl enable lme-setup.service 64 | 65 | CMD ["/lib/systemd/systemd"] 66 | -------------------------------------------------------------------------------- /docker/22.04/check-lme-setup.ps1: -------------------------------------------------------------------------------- 1 | # Default timeout in minutes (30 minutes) 2 | $timeoutMinutes = 30 3 | $startTime = Get-Date 4 | 5 | # Function to check if timeout has been reached 6 | function Test-Timeout { 7 | $currentTime = Get-Date 8 | $elapsedTime = ($currentTime - $startTime).TotalMinutes 9 | if ($elapsedTime -gt $timeoutMinutes) { 10 | Write-Host "ERROR: Setup timed out after $timeoutMinutes minutes" 11 | exit 1 12 | } 13 | } 14 | 15 | Write-Host "Starting LME setup check..." 16 | 17 | # Main loop 18 | while ($true) { 19 | # Check if the timeout has been reached 20 | Test-Timeout 21 | 22 | # Get the logs and check for completion 23 | $logs = docker compose exec lme journalctl -u lme-setup -o cat --no-hostname 24 | 25 | # Check for successful completion 26 | if ($logs -match "First-time initialization complete") { 27 | Write-Host "SUCCESS: LME setup completed successfully" 28 | exit 0 29 | } 30 | 31 | # Check for failure indicators 32 | if ($logs -match "failed=1") { 33 | Write-Host "ERROR: Ansible playbook reported failures" 34 | exit 1 35 | } 36 | 37 | # Track progress through the playbooks 38 | $recapCount = ($logs | Select-String "PLAY RECAP" -AllMatches).Matches.Count 39 | if ($recapCount -gt 0) { 40 | Write-Host "INFO: Detected $recapCount of 2 playbook completions..." 41 | } 42 | 43 | # Wait before next check (60 seconds) 44 | Start-Sleep -Seconds 60 45 | } -------------------------------------------------------------------------------- /docker/22.04/check-lme-setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Default timeout in seconds (30 minutes) 4 | TIMEOUT=1800 5 | START_TIME=$(date +%s) 6 | 7 | # Function to check if timeout has been reached 8 | check_timeout() { 9 | current_time=$(date +%s) 10 | elapsed_time=$((current_time - START_TIME)) 11 | if [ $elapsed_time -gt $TIMEOUT ]; then 12 | echo "ERROR: Setup timed out after ${TIMEOUT} seconds" 13 | exit 1 14 | fi 15 | } 16 | 17 | echo "Starting LME setup check..." 18 | 19 | # Main loop 20 | while true; do 21 | # Check if the timeout has been reached 22 | check_timeout 23 | 24 | # Get the logs and check for completion 25 | logs=$(docker compose exec lme journalctl -u lme-setup -o cat --no-hostname) 26 | 27 | # Check for successful completion 28 | if echo "$logs" | grep -q "First-time initialization complete"; then 29 | echo "SUCCESS: LME setup completed successfully" 30 | exit 0 31 | fi 32 | 33 | # Check for failure indicators 34 | if echo "$logs" | grep -q "failed=1"; then 35 | echo "ERROR: Ansible playbook reported failures" 36 | exit 1 37 | fi 38 | 39 | # Track progress through the playbooks 40 | recap_count=$(echo "$logs" | grep -c "PLAY RECAP") 41 | if [ "$recap_count" -gt 0 ]; then 42 | echo "INFO: Detected ${recap_count} of 2 playbook completions..." 43 | fi 44 | 45 | # Wait before next check (60 seconds) 46 | sleep 60 47 | done -------------------------------------------------------------------------------- /docker/22.04/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | lme: 3 | build: 4 | context: ../../ 5 | dockerfile: docker/22.04/Dockerfile 6 | target: lme 7 | args: 8 | USER_ID: "${HOST_UID:-1001}" 9 | GROUP_ID: "${HOST_GID:-1001}" 10 | container_name: lme 11 | working_dir: /root 12 | volumes: 13 | - ../../../LME:/root/LME 14 | - /sys/fs/cgroup:/sys/fs/cgroup:rslave 15 | - /sys/fs/cgroup/systemd:/sys/fs/cgroup/systemd:rw 16 | cap_add: 17 | - SYS_ADMIN 18 | security_opt: 19 | - seccomp:unconfined 20 | privileged: true 21 | user: root 22 | tmpfs: 23 | - /tmp 24 | - /run 25 | - /run/lock 26 | environment: 27 | - PODMAN_IGNORE_CGROUPSV1_WARNING=1 28 | - LANG=en_US.UTF-8 29 | - LANGUAGE=en_US:en 30 | - LC_ALL=en_US.UTF-8 31 | - container=docker 32 | - HOST_IP=${HOST_IP} 33 | command: ["/lib/systemd/systemd", "--system"] 34 | ports: 35 | - "5601:5601" 36 | - "443:443" 37 | - "8220:8220" 38 | - "9200:9200" -------------------------------------------------------------------------------- /docker/22.04/environment_example.sh: -------------------------------------------------------------------------------- 1 | #export HOST_IP=192.168.1.194 2 | -------------------------------------------------------------------------------- /docker/22.04/lme-init.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | INIT_FLAG="/opt/.lme_initialized" 4 | 5 | if [ ! -f "$INIT_FLAG" ]; then 6 | echo "Running first-time LME initialization..." 7 | rm -rf /opt/lme/lme-environment.env 8 | 9 | # Copy environment file if it doesn't exist 10 | cp -n /root/LME/config/example.env /root/LME/config/lme-environment.env 11 | 12 | . /root/LME/docker/22.04/environment.sh 13 | 14 | # Update IPVAR in the environment file with the passed HOST_IP 15 | if [ ! -z "$HOST_IP" ]; then 16 | echo "Using HOST_IP: $HOST_IP" 17 | sed -i "s/IPVAR=.*/IPVAR=$HOST_IP/" /root/LME/config/lme-environment.env 18 | export IPVAR=$HOST_IP 19 | else 20 | echo "Warning: HOST_IP not set, using default IPVAR value" 21 | fi 22 | cp -n /root/LME/config/lme-environment.env /opt/lme/lme-environment.env 23 | 24 | # Run initial setup with timing 25 | cd /root/LME/ansible/ 26 | echo "Starting system setup at $(date)" 27 | time ansible-playbook install_lme_local.yml --tags system 28 | echo "Starting post-install setup at $(date)" 29 | time ansible-playbook post_install_local.yml -e "IPVAR=$IPVAR" -e "debug_mode=true" 30 | echo "Setup completed at $(date)" 31 | 32 | # Create flag file to indicate initialization is complete 33 | touch "$INIT_FLAG" 34 | echo "First-time initialization complete." 35 | else 36 | echo "LME already initialized, skipping first-time setup." 37 | systemctl disable lme-setup.service 38 | systemctl daemon-reload 39 | fi -------------------------------------------------------------------------------- /docker/22.04/lme-setup.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=LME Setup Service 3 | After=nix-daemon.service 4 | Requires=nix-daemon.service 5 | 6 | [Service] 7 | Type=oneshot 8 | WorkingDirectory=/root/LME 9 | ExecStart=/bin/bash -c /root/LME/docker/22.04/lme-init.sh 10 | RemainAfterExit=yes 11 | StandardOutput=journal 12 | StandardError=journal 13 | 14 | [Install] 15 | WantedBy=multi-user.target 16 | -------------------------------------------------------------------------------- /docker/24.04/Dockerfile: -------------------------------------------------------------------------------- 1 | # Base stage with common dependencies 2 | FROM ubuntu:24.04 AS base 3 | 4 | ARG USER_ID=1002 5 | ARG GROUP_ID=1002 6 | 7 | ENV DEBIAN_FRONTEND=noninteractive \ 8 | LANG=en_US.UTF-8 \ 9 | LANGUAGE=en_US:en \ 10 | LC_ALL=en_US.UTF-8 11 | 12 | RUN apt-get update && apt-get install -y --no-install-recommends \ 13 | locales ca-certificates sudo sshpass openssh-client \ 14 | && locale-gen en_US.UTF-8 \ 15 | && update-locale LANG=en_US.UTF-8 LC_ALL=en_US.UTF-8 \ 16 | && while getent group $GROUP_ID > /dev/null 2>&1; do GROUP_ID=$((GROUP_ID + 1)); done \ 17 | && while getent passwd $USER_ID > /dev/null 2>&1; do USER_ID=$((USER_ID + 1)); done \ 18 | && groupadd -g $GROUP_ID lme-user \ 19 | && useradd -m -u $USER_ID -g lme-user lme-user \ 20 | && usermod -aG sudo lme-user \ 21 | && echo "lme-user ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers \ 22 | && apt-get clean \ 23 | && rm -rf /var/lib/apt/lists/* 24 | 25 | ENV LANG=en_US.UTF-8 LANGUAGE=en_US:en LC_ALL=en_US.UTF-8 26 | 27 | ENV BASE_DIR=/home/lme-user 28 | WORKDIR $BASE_DIR 29 | 30 | # Lme stage with full dependencies 31 | FROM base AS lme 32 | 33 | 34 | RUN apt-get update && apt-get install -y --no-install-recommends \ 35 | systemd systemd-sysv lsb-release python3 python3-venv python3-pip \ 36 | zip git curl wget cron pkg-config libcairo2-dev libdbus-1-dev \ 37 | distro-info libgirepository1.0-dev ansible python3-apt \ 38 | && apt-get clean \ 39 | && rm -rf /var/lib/apt/lists/* 40 | 41 | 42 | RUN cd /lib/systemd/system/sysinit.target.wants/ && \ 43 | ls | grep -v systemd-tmpfiles-setup | xargs rm -f $1 && \ 44 | rm -f /lib/systemd/system/multi-user.target.wants/* && \ 45 | rm -f /etc/systemd/system/*.wants/* && \ 46 | rm -f /lib/systemd/system/local-fs.target.wants/* && \ 47 | rm -f /lib/systemd/system/sockets.target.wants/*udev* && \ 48 | rm -f /lib/systemd/system/sockets.target.wants/*initctl* && \ 49 | rm -f /lib/systemd/system/basic.target.wants/* && \ 50 | rm -f /lib/systemd/system/anaconda.target.wants/* && \ 51 | mkdir -p /etc/systemd/system/systemd-logind.service.d && \ 52 | echo -e "[Service]\nProtectHostname=no" > /etc/systemd/system/systemd-logind.service.d/override.conf 53 | 54 | 55 | COPY docker/24.04/lme-setup.service /etc/systemd/system/ 56 | 57 | RUN chmod 644 /etc/systemd/system/lme-setup.service 58 | 59 | COPY ansible/install_lme_local.yml /root/LME/ansible/ 60 | COPY config/example.env /root/LME/config/lme-environment.env 61 | RUN ansible-playbook /root/LME/ansible/install_lme_local.yml --tags base 62 | 63 | # Enable the service 64 | RUN systemctl enable lme-setup.service 65 | 66 | CMD ["/lib/systemd/systemd"] 67 | -------------------------------------------------------------------------------- /docker/24.04/check-lme-setup.ps1: -------------------------------------------------------------------------------- 1 | # Default timeout in minutes (30 minutes) 2 | $timeoutMinutes = 30 3 | $startTime = Get-Date 4 | 5 | # Function to check if timeout has been reached 6 | function Test-Timeout { 7 | $currentTime = Get-Date 8 | $elapsedTime = ($currentTime - $startTime).TotalMinutes 9 | if ($elapsedTime -gt $timeoutMinutes) { 10 | Write-Host "ERROR: Setup timed out after $timeoutMinutes minutes" 11 | exit 1 12 | } 13 | } 14 | 15 | Write-Host "Starting LME setup check..." 16 | 17 | # Main loop 18 | while ($true) { 19 | # Check if the timeout has been reached 20 | Test-Timeout 21 | 22 | # Get the logs and check for completion 23 | $logs = docker compose exec lme journalctl -u lme-setup -o cat --no-hostname 24 | 25 | # Check for successful completion 26 | if ($logs -match "First-time initialization complete") { 27 | Write-Host "SUCCESS: LME setup completed successfully" 28 | exit 0 29 | } 30 | 31 | # Check for failure indicators 32 | if ($logs -match "failed=1") { 33 | Write-Host "ERROR: Ansible playbook reported failures" 34 | exit 1 35 | } 36 | 37 | # Track progress through the playbooks 38 | $recapCount = ($logs | Select-String "PLAY RECAP" -AllMatches).Matches.Count 39 | if ($recapCount -gt 0) { 40 | Write-Host "INFO: Detected $recapCount of 2 playbook completions..." 41 | } 42 | 43 | # Wait before next check (60 seconds) 44 | Start-Sleep -Seconds 60 45 | } -------------------------------------------------------------------------------- /docker/24.04/check-lme-setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Default timeout in seconds (30 minutes) 4 | TIMEOUT=1800 5 | START_TIME=$(date +%s) 6 | 7 | # Function to check if timeout has been reached 8 | check_timeout() { 9 | current_time=$(date +%s) 10 | elapsed_time=$((current_time - START_TIME)) 11 | if [ $elapsed_time -gt $TIMEOUT ]; then 12 | echo "ERROR: Setup timed out after ${TIMEOUT} seconds" 13 | exit 1 14 | fi 15 | } 16 | 17 | echo "Starting LME setup check..." 18 | 19 | # Main loop 20 | while true; do 21 | # Check if the timeout has been reached 22 | check_timeout 23 | 24 | # Get the logs and check for completion 25 | logs=$(docker compose exec lme journalctl -u lme-setup -o cat --no-hostname) 26 | 27 | # Check for successful completion 28 | if echo "$logs" | grep -q "First-time initialization complete"; then 29 | echo "SUCCESS: LME setup completed successfully" 30 | exit 0 31 | fi 32 | 33 | # Check for failure indicators 34 | if echo "$logs" | grep -q "failed=1"; then 35 | echo "ERROR: Ansible playbook reported failures" 36 | exit 1 37 | fi 38 | 39 | # Track progress through the playbooks 40 | recap_count=$(echo "$logs" | grep -c "PLAY RECAP") 41 | if [ "$recap_count" -gt 0 ]; then 42 | echo "INFO: Detected ${recap_count} of 2 playbook completions..." 43 | fi 44 | 45 | # Wait before next check (60 seconds) 46 | sleep 60 47 | done -------------------------------------------------------------------------------- /docker/24.04/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | lme: 3 | build: 4 | context: ../../ 5 | dockerfile: docker/24.04/Dockerfile 6 | target: lme 7 | args: 8 | USER_ID: "${HOST_UID:-1001}" 9 | GROUP_ID: "${HOST_GID:-1001}" 10 | container_name: lme 11 | working_dir: /root 12 | volumes: 13 | - ../../../LME:/root/LME 14 | - /sys/fs/cgroup:/sys/fs/cgroup:rslave 15 | - /sys/fs/cgroup/systemd:/sys/fs/cgroup/systemd:rw 16 | cap_add: 17 | - SYS_ADMIN 18 | security_opt: 19 | - seccomp:unconfined 20 | privileged: true 21 | user: root 22 | tmpfs: 23 | - /tmp 24 | - /run 25 | - /run/lock 26 | environment: 27 | - PODMAN_IGNORE_CGROUPSV1_WARNING=1 28 | - LANG=en_US.UTF-8 29 | - LANGUAGE=en_US:en 30 | - LC_ALL=en_US.UTF-8 31 | - container=docker 32 | - HOST_IP=${HOST_IP} 33 | command: ["/lib/systemd/systemd", "--system"] 34 | ports: 35 | - "5601:5601" 36 | - "443:443" 37 | - "8220:8220" 38 | - "9200:9200" -------------------------------------------------------------------------------- /docker/24.04/environment.sh: -------------------------------------------------------------------------------- 1 | #export HOST_IP=192.168.50.205 2 | -------------------------------------------------------------------------------- /docker/24.04/environment_example.sh: -------------------------------------------------------------------------------- 1 | #export HOST_IP=192.168.1.194 2 | -------------------------------------------------------------------------------- /docker/24.04/lme-init.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | INIT_FLAG="/opt/.lme_initialized" 4 | 5 | if [ ! -f "$INIT_FLAG" ]; then 6 | echo "Running first-time LME initialization..." 7 | rm -rf /opt/lme/lme-environment.env 8 | 9 | # Copy environment file if it doesn't exist 10 | cp -n /root/LME/config/example.env /root/LME/config/lme-environment.env 11 | 12 | . /root/LME/docker/24.04/environment.sh 13 | 14 | # Update IPVAR in the environment file with the passed HOST_IP 15 | if [ ! -z "$HOST_IP" ]; then 16 | echo "Using HOST_IP: $HOST_IP" 17 | sed -i "s/IPVAR=.*/IPVAR=$HOST_IP/" /root/LME/config/lme-environment.env 18 | export IPVAR=$HOST_IP 19 | else 20 | echo "Warning: HOST_IP not set, using default IPVAR value" 21 | fi 22 | cp -n /root/LME/config/lme-environment.env /opt/lme/lme-environment.env 23 | 24 | # Run initial setup with timing 25 | cd /root/LME/ansible/ 26 | echo "Starting system setup at $(date)" 27 | time ansible-playbook install_lme_local.yml --tags system 28 | echo "Starting post-install setup at $(date)" 29 | time ansible-playbook post_install_local.yml -e "IPVAR=$IPVAR" -e "debug_mode=true" 30 | echo "Setup completed at $(date)" 31 | 32 | # Create flag file to indicate initialization is complete 33 | touch "$INIT_FLAG" 34 | echo "First-time initialization complete." 35 | else 36 | echo "LME already initialized, skipping first-time setup." 37 | systemctl disable lme-setup.service 38 | systemctl daemon-reload 39 | fi -------------------------------------------------------------------------------- /docker/24.04/lme-setup.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=LME Setup Service 3 | After=nix-daemon.service 4 | Requires=nix-daemon.service 5 | 6 | [Service] 7 | Type=oneshot 8 | WorkingDirectory=/root/LME 9 | ExecStart=/bin/bash -c /root/LME/docker/24.04/lme-init.sh 10 | RemainAfterExit=yes 11 | StandardOutput=journal 12 | StandardError=journal 13 | 14 | [Install] 15 | WantedBy=multi-user.target 16 | -------------------------------------------------------------------------------- /docker/README.md: -------------------------------------------------------------------------------- 1 | # LME Docker Setup 2 | Download and unzip the latest release of LME from the [releases page](https://github.com/cisagov/lme/releases) into your home directory. 3 | 4 | This guide is for setting up the LME container using Docker. It is NOT persistent, which means you will need to run, and rebuild, the container again after stopping it. 5 | It is for testing purposes only, so you can easily install and examine the parts of the LME stack. 6 | 7 | All commands in this guide should be run from the `LME/docker` directory of the repository. 8 | You can choose either the 22.04 or 24.04 directories to build the container. 9 | 10 | 11 | ## Prerequisites 12 | 13 | - A current version of Docker which should include Docker compose (there is an installer script for ubuntu in the `docker` directory) 14 | - At least 20GB of RAM 15 | - 100GB of disk space preferred 16 | 17 | Note: We have installed Docker desktop on Windows and Linux and have been able to build and run the container. 18 | 19 | ### Special instructions for Windows running Linux 20 | If running Linux on a hypervisor or virtual machine, you may need to modify the GRUB configuration in your VM: 21 | 22 | 1. Add the following to the `GRUB_CMDLINE_LINUX` line in `/etc/default/grub`: 23 | ```bash 24 | GRUB_CMDLINE_LINUX="systemd.unified_cgroup_hierarchy=0 cgroup_enable=memory swapaccount=1" 25 | ``` 26 | 27 | 2. Update GRUB and reboot: 28 | ```bash 29 | sudo update-grub 30 | sudo reboot 31 | ``` 32 | 33 | ## Building and Running LME 34 | 35 | 1. Cd to the version you want to run (eg `cd LME/docker/22.04`) and build the container (this may take several minutes): 36 | ```bash 37 | docker compose build 38 | ``` 39 | 2. Copy the environment_example.sh file to environment.sh and set the IP address of the host machine that you will access the LME UI from. 40 | 41 | Set this variable to the ip of the host machine. 42 | ```bash 43 | export HOST_IP=192.168.50.205 44 | ``` 45 | 3. Start the container: 46 | ```bash 47 | docker compose up -d 48 | ``` 49 | 50 | ## Monitoring Setup Progress 51 | 52 | The initial LME setup can take 15-30 minutes to complete. Here are ways to monitor the progress: 53 | 54 | ### View Setup Logs 55 | Watch the detailed setup logs and wait for it to report that the setup is complete: 56 | ```bash 57 | docker compose exec lme journalctl -u lme-setup -f -o cat --no-hostname 58 | ``` 59 | When the setup is complete, you will see something like this: 60 | ```bash 61 | Setup completed at Tue Feb 11 12:42:30 PM UTC 2025 62 | First-time initialization complete. 63 | Finished LME Setup Service. 64 | ``` 65 | 66 | ### Check Setup Status 67 | This will check the status of the setup and report if it is complete, but it doesn't report the progress. 68 | Check the current setup status: 69 | 70 | #### Linux: 71 | ```bash 72 | ./check-lme-setup.sh 73 | ``` 74 | 75 | #### Windows PowerShell: 76 | 1. Run PowerShell as Administrator 77 | 2. Enable script execution (one-time setup): 78 | ```powershell 79 | # For current user only (recommended) 80 | Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Scope CurrentUser 81 | ``` 82 | 3. Run the status check: 83 | ```powershell 84 | .\check-lme-setup.ps1 85 | ``` 86 | 87 | ## Accessing the Container 88 | 89 | ### List Running Containers 90 | These must be run in the directory of the version you are using. 91 | View all running containers: 92 | ```bash 93 | docker compose ps 94 | ``` 95 | 96 | ### Access Container Shell 97 | Enter the running container: 98 | ```bash 99 | docker compose exec lme bash 100 | ``` 101 | This will give you a root shell into the container and you can follow the instructions on the main readme about how 102 | to check containers within the container. In the [main readme](https://github.com/cisagov/lme?tab=readme-ov-file#table-of-contents) 103 | locate the "Post installation steps" section and the sections that follow, to manage and access the system. 104 | 105 | ### Getting passwords for the users 106 | The passwords for the users are accessed by running the following command: 107 | ```bash 108 | docker compose exec lme bash -c "/root/LME/scripts/extract_secrets.sh -p" 109 | ``` 110 | The user and password for the LME UI are: 111 | ```bash 112 | elastic=password_printed_in_the_last_command 113 | # user: elastic 114 | # password: password_printed_in_the_last_command 115 | ``` 116 | 117 | ### Access the LME UI 118 | The LME UI is available at https://localhost 119 | 120 | ### Stop the Container 121 | When you're done: 122 | ```bash 123 | docker compose down 124 | ``` 125 | 126 | ## Troubleshooting 127 | 128 | - If the container fails to start, check the logs: 129 | ```bash 130 | docker compose logs lme 131 | ``` 132 | 133 | - If you need to rebuild from scratch: 134 | ```bash 135 | docker compose down -v 136 | docker compose build --no-cache 137 | docker compose up -d 138 | ``` 139 | 140 | -------------------------------------------------------------------------------- /docker/install_latest_docker_in_ubuntu.sh: -------------------------------------------------------------------------------- 1 | # Remove old versions 2 | sudo apt-get remove docker docker-engine docker.io containerd runc 3 | 4 | # Add Docker's official GPG key 5 | sudo apt-get update 6 | sudo apt-get install ca-certificates curl gnupg 7 | sudo install -m 0755 -d /etc/apt/keyrings 8 | curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg 9 | sudo chmod a+r /etc/apt/keyrings/docker.gpg 10 | 11 | # Add Docker repository 12 | echo \ 13 | "deb [arch="$(dpkg --print-architecture)" signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu \ 14 | "$(. /etc/os-release && echo "$VERSION_CODENAME")" stable" | \ 15 | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null 16 | 17 | # Install Docker 18 | sudo apt-get update 19 | sudo apt-get install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin -------------------------------------------------------------------------------- /docs/imgs/AdjustForwardedEventsLogSize.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/AdjustForwardedEventsLogSize.png -------------------------------------------------------------------------------- /docs/imgs/OverviewDiagram.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/OverviewDiagram.png -------------------------------------------------------------------------------- /docs/imgs/add-exceptions.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/add-exceptions.png -------------------------------------------------------------------------------- /docs/imgs/alert-enable-menu.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/alert-enable-menu.png -------------------------------------------------------------------------------- /docs/imgs/backup_pics/policy_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/backup_pics/policy_1.png -------------------------------------------------------------------------------- /docs/imgs/backup_pics/policy_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/backup_pics/policy_2.png -------------------------------------------------------------------------------- /docs/imgs/backup_pics/policy_3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/backup_pics/policy_3.png -------------------------------------------------------------------------------- /docs/imgs/backup_pics/policy_4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/backup_pics/policy_4.png -------------------------------------------------------------------------------- /docs/imgs/backup_pics/policy_5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/backup_pics/policy_5.png -------------------------------------------------------------------------------- /docs/imgs/backup_pics/policy_6.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/backup_pics/policy_6.png -------------------------------------------------------------------------------- /docs/imgs/backup_pics/repository_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/backup_pics/repository_1.png -------------------------------------------------------------------------------- /docs/imgs/backup_pics/repository_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/backup_pics/repository_2.png -------------------------------------------------------------------------------- /docs/imgs/backup_pics/repository_3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/backup_pics/repository_3.png -------------------------------------------------------------------------------- /docs/imgs/backup_pics/snapshot_and_restore.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/backup_pics/snapshot_and_restore.png -------------------------------------------------------------------------------- /docs/imgs/chapter_overview.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/chapter_overview.jpg -------------------------------------------------------------------------------- /docs/imgs/cisa.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/cisa.png -------------------------------------------------------------------------------- /docs/imgs/close-index.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/close-index.png -------------------------------------------------------------------------------- /docs/imgs/createindex.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/createindex.jpg -------------------------------------------------------------------------------- /docs/imgs/dashboard.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/dashboard.jpg -------------------------------------------------------------------------------- /docs/imgs/dashboard/app_password.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/dashboard/app_password.png -------------------------------------------------------------------------------- /docs/imgs/dashboard/dataview-create.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/dashboard/dataview-create.png -------------------------------------------------------------------------------- /docs/imgs/dashboard/delete-import-dashboards.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/dashboard/delete-import-dashboards.png -------------------------------------------------------------------------------- /docs/imgs/dashboard/discover-pivot-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/dashboard/discover-pivot-1.png -------------------------------------------------------------------------------- /docs/imgs/dashboard/discover-pivot-2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/dashboard/discover-pivot-2.png -------------------------------------------------------------------------------- /docs/imgs/dashboard/elastalert-dataview.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/dashboard/elastalert-dataview.png -------------------------------------------------------------------------------- /docs/imgs/dashboard/lme-dashboards-list.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/dashboard/lme-dashboards-list.png -------------------------------------------------------------------------------- /docs/imgs/dashboard/wazuh-dashboards-list.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/dashboard/wazuh-dashboards-list.png -------------------------------------------------------------------------------- /docs/imgs/default-index-pattern.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/default-index-pattern.png -------------------------------------------------------------------------------- /docs/imgs/default-winlogbeat.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/default-winlogbeat.png -------------------------------------------------------------------------------- /docs/imgs/delete-indices.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/delete-indices.jpg -------------------------------------------------------------------------------- /docs/imgs/delete-originals.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/delete-originals.png -------------------------------------------------------------------------------- /docs/imgs/dev-tools.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/dev-tools.jpg -------------------------------------------------------------------------------- /docs/imgs/discover_tab.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/discover_tab.jpg -------------------------------------------------------------------------------- /docs/imgs/duplicate-indices.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/duplicate-indices.jpg -------------------------------------------------------------------------------- /docs/imgs/edit-update-script.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/edit-update-script.png -------------------------------------------------------------------------------- /docs/imgs/elkstack.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/elkstack.jpg -------------------------------------------------------------------------------- /docs/imgs/error.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/error.png -------------------------------------------------------------------------------- /docs/imgs/event_viewer_prompt.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/event_viewer_prompt.png -------------------------------------------------------------------------------- /docs/imgs/eventforwarding_overview.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/eventforwarding_overview.jpg -------------------------------------------------------------------------------- /docs/imgs/eventviewer.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/eventviewer.jpg -------------------------------------------------------------------------------- /docs/imgs/example-exception.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/example-exception.png -------------------------------------------------------------------------------- /docs/imgs/exceptions.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/exceptions.png -------------------------------------------------------------------------------- /docs/imgs/extra_beats_pics/deletion-enable.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/extra_beats_pics/deletion-enable.png -------------------------------------------------------------------------------- /docs/imgs/extra_beats_pics/filebeat-selection.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/extra_beats_pics/filebeat-selection.png -------------------------------------------------------------------------------- /docs/imgs/extra_beats_pics/filebeat.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/extra_beats_pics/filebeat.png -------------------------------------------------------------------------------- /docs/imgs/extra_beats_pics/ilm.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/extra_beats_pics/ilm.png -------------------------------------------------------------------------------- /docs/imgs/extra_beats_pics/logstash-writer.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/extra_beats_pics/logstash-writer.png -------------------------------------------------------------------------------- /docs/imgs/extra_beats_pics/roles.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/extra_beats_pics/roles.png -------------------------------------------------------------------------------- /docs/imgs/extra_beats_pics/stack-management.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/extra_beats_pics/stack-management.png -------------------------------------------------------------------------------- /docs/imgs/extra_beats_pics/update-retention.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/extra_beats_pics/update-retention.png -------------------------------------------------------------------------------- /docs/imgs/extra_beats_pics/update-role.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/extra_beats_pics/update-role.png -------------------------------------------------------------------------------- /docs/imgs/firstload.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/firstload.jpg -------------------------------------------------------------------------------- /docs/imgs/fleetservermissingurl.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/fleetservermissingurl.png -------------------------------------------------------------------------------- /docs/imgs/git-flow.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/git-flow.png -------------------------------------------------------------------------------- /docs/imgs/gpo.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/gpo.jpg -------------------------------------------------------------------------------- /docs/imgs/gpo_pics/aduc.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/gpo_pics/aduc.jpg -------------------------------------------------------------------------------- /docs/imgs/gpo_pics/create_new_object.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/gpo_pics/create_new_object.jpg -------------------------------------------------------------------------------- /docs/imgs/gpo_pics/gpmc.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/gpo_pics/gpmc.jpg -------------------------------------------------------------------------------- /docs/imgs/gpo_pics/import_done.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/gpo_pics/import_done.jpg -------------------------------------------------------------------------------- /docs/imgs/gpo_pics/import_new_object.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/gpo_pics/import_new_object.jpg -------------------------------------------------------------------------------- /docs/imgs/gpo_pics/link_an_ou.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/gpo_pics/link_an_ou.jpg -------------------------------------------------------------------------------- /docs/imgs/gpo_pics/name_new_object.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/gpo_pics/name_new_object.jpg -------------------------------------------------------------------------------- /docs/imgs/gpo_pics/new_ou.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/gpo_pics/new_ou.jpg -------------------------------------------------------------------------------- /docs/imgs/gpo_pics/optional_features.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/gpo_pics/optional_features.png -------------------------------------------------------------------------------- /docs/imgs/gpo_pics/rsat_gpmc_optional_features.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/gpo_pics/rsat_gpmc_optional_features.png -------------------------------------------------------------------------------- /docs/imgs/gpo_pics/select_backup.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/gpo_pics/select_backup.jpg -------------------------------------------------------------------------------- /docs/imgs/gpo_pics/select_gpo_link.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/gpo_pics/select_gpo_link.jpg -------------------------------------------------------------------------------- /docs/imgs/gpoedit.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/gpoedit.jpg -------------------------------------------------------------------------------- /docs/imgs/healthcheckstatus.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/healthcheckstatus.jpg -------------------------------------------------------------------------------- /docs/imgs/import.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/import.jpg -------------------------------------------------------------------------------- /docs/imgs/import.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/import.png -------------------------------------------------------------------------------- /docs/imgs/import1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/import1.png -------------------------------------------------------------------------------- /docs/imgs/import2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/import2.png -------------------------------------------------------------------------------- /docs/imgs/index-patterns.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/index-patterns.png -------------------------------------------------------------------------------- /docs/imgs/index-selection.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/index-selection.png -------------------------------------------------------------------------------- /docs/imgs/insecure-powershell.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/insecure-powershell.png -------------------------------------------------------------------------------- /docs/imgs/lme-architecture-v2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/lme-architecture-v2.jpg -------------------------------------------------------------------------------- /docs/imgs/lme-architecture-v2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/lme-architecture-v2.png -------------------------------------------------------------------------------- /docs/imgs/lme-cloud.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/lme-cloud.jpg -------------------------------------------------------------------------------- /docs/imgs/lme-image.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/lme-image.png -------------------------------------------------------------------------------- /docs/imgs/logistics.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/logistics.png -------------------------------------------------------------------------------- /docs/imgs/nav-bar.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/nav-bar.png -------------------------------------------------------------------------------- /docs/imgs/re-index-script.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/re-index-script.jpg -------------------------------------------------------------------------------- /docs/imgs/restore-details.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/restore-details.png -------------------------------------------------------------------------------- /docs/imgs/restore.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/restore.png -------------------------------------------------------------------------------- /docs/imgs/retention_pics/retention_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/retention_pics/retention_1.png -------------------------------------------------------------------------------- /docs/imgs/rules_error.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/rules_error.png -------------------------------------------------------------------------------- /docs/imgs/select-rule.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/select-rule.png -------------------------------------------------------------------------------- /docs/imgs/siem.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/siem.png -------------------------------------------------------------------------------- /docs/imgs/siem1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/siem1.png -------------------------------------------------------------------------------- /docs/imgs/siem2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/siem2.png -------------------------------------------------------------------------------- /docs/imgs/siem3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/siem3.png -------------------------------------------------------------------------------- /docs/imgs/siem4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/siem4.png -------------------------------------------------------------------------------- /docs/imgs/siem5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/siem5.png -------------------------------------------------------------------------------- /docs/imgs/siem6.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/siem6.png -------------------------------------------------------------------------------- /docs/imgs/snap-restore.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/snap-restore.png -------------------------------------------------------------------------------- /docs/imgs/stack-management.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/stack-management.jpg -------------------------------------------------------------------------------- /docs/imgs/stack-management.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/stack-management.png -------------------------------------------------------------------------------- /docs/imgs/sysmon-task-properties.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/sysmon-task-properties.png -------------------------------------------------------------------------------- /docs/imgs/sysvol.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/sysvol.jpg -------------------------------------------------------------------------------- /docs/imgs/task-complete.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/task-complete.png -------------------------------------------------------------------------------- /docs/imgs/task-status.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/task-status.png -------------------------------------------------------------------------------- /docs/imgs/task.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/task.png -------------------------------------------------------------------------------- /docs/imgs/testing-screenshots/delete.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/testing-screenshots/delete.png -------------------------------------------------------------------------------- /docs/imgs/testing-screenshots/shell.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/testing-screenshots/shell.png -------------------------------------------------------------------------------- /docs/imgs/testing-screenshots/shell2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/testing-screenshots/shell2.png -------------------------------------------------------------------------------- /docs/imgs/testing-screenshots/shell3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/testing-screenshots/shell3.png -------------------------------------------------------------------------------- /docs/imgs/testing-screenshots/shell4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/testing-screenshots/shell4.png -------------------------------------------------------------------------------- /docs/imgs/testing-screenshots/shell5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/testing-screenshots/shell5.png -------------------------------------------------------------------------------- /docs/imgs/timefilter.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/timefilter.jpg -------------------------------------------------------------------------------- /docs/imgs/timerange.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/timerange.jpg -------------------------------------------------------------------------------- /docs/imgs/timerange.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/timerange.png -------------------------------------------------------------------------------- /docs/imgs/troubleshooting-overview.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/troubleshooting-overview.jpg -------------------------------------------------------------------------------- /docs/imgs/update-rules.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/update-rules.png -------------------------------------------------------------------------------- /docs/imgs/usersec.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/usersec.png -------------------------------------------------------------------------------- /docs/imgs/verify.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/verify.png -------------------------------------------------------------------------------- /docs/imgs/winlogbeat-install.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/winlogbeat-install.png -------------------------------------------------------------------------------- /docs/imgs/winlogbeat-location.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/winlogbeat-location.png -------------------------------------------------------------------------------- /docs/imgs/winlogbeat-running.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/winlogbeat-running.png -------------------------------------------------------------------------------- /docs/imgs/winscp.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisagov/LME/4d74a191f6ecdd3bbe8903b42001f6a2e0f6fae0/docs/imgs/winscp.jpg -------------------------------------------------------------------------------- /docs/markdown/agents/elastic-agent-management.md: -------------------------------------------------------------------------------- 1 | # Elastic Agent Management - Enrollment Guide 2 | 3 | This guide will walk you through the process of enrolling an Elastic agent. 4 | 5 | ## Steps to Enroll an Agent 6 | 7 | 1. **Access the Fleet Menu** 8 | - Open the LME dashboard: `https://{SERVER_IP}` 9 | - Password information can be found in the [README](/README.md#retrieving-passwords). 10 | - Open the "hamburger" menu icon in the top left (three horizontal lines) 11 | - Scroll down and select "Fleet" from the menu 12 | 13 | 2. **Add a New Agent** 14 | - Click on the "Add agent" button 15 | 16 | 3. **Select the Policy** 17 | - Ensure you select the appropriate policy for the agent 18 | - For example, choose "Endpoint Policy" if you're adding an endpoint device 19 | 20 | 4. **Enrollment Settings** 21 | - Keep the "Enroll in Fleet" option selected 22 | 23 | 5. **Choose the Agent Type** 24 | - Select the appropriate option based on your endpoint: 25 | - Linux Tar 26 | - Mac 27 | - Windows (ensure you run this in a PowerShell prompt with administrator privileges) 28 | 29 | 6. **Installation Command** 30 | - You will be presented with an installation command for the selected platform 31 | - Note: If you haven't added the LME certificates to your trusted store, you'll need to modify the command 32 | 33 | 7. **Modify the Command If necessary(e.g.,if certificates have not been added to the trusted store)** 34 | - Add `--insecure` at the end of the ./elastic-agent install` command 35 | - This is similar to clicking "continue to website" in a browser when you get a certificate warning 36 | - Example: 37 | ``` 38 | ./elastic-agent install [-other-flags-youll-see] --insecure 39 | ``` 40 | 41 | - it should look like this screenshot: 42 | ![example-screenshot](/docs/imgs/insecure-powershell.png) 43 | 44 | 8. **Execute the Command** 45 | - Run the command on the desired host. 46 | - It is recommended to run each line individually so you can see the status of each command. The entire process will download an agent, unzip it, and install it. 47 | 48 | From Fleet you should see the agent enrolled now. 49 | 50 | # LME Elastic Agent Integration Example 51 | 52 | This guide will walk you through the process of adding a Windows integration to an agent policy in the LME system. 53 | 54 | ## Steps to Add Windows Integration 55 | 56 | 1. **Access Fleet and Agent Policies** 57 | - Open the LME dashboard: `https://{SERVER_IP}` 58 | - Password information can be found in the [Readme](/README.md#retrieving-passwords). 59 | - Open the "hamburger" menu icon in the top left (three horizontal lines) 60 | - Select "Fleet" from the menu 61 | - Click on "Agent policies" 62 | 63 | 2. **Select the Target Policy** 64 | - Choose the policy you want to add the integration to 65 | - For example, select "Endpoint Policy" 66 | 67 | 3. **Add Integration** 68 | - Click the "Add integration" button 69 | 70 | 4. **Choose Windows Integration** 71 | - From the list of available integrations, select "Windows" 72 | - Select "add Windows" 73 | 74 | 5. **Configure Windows Integration** 75 | - Scroll down to review the options available 76 | - You'll see various Windows logs and metrics that can be collected 77 | 78 | 6. **Customize Log Collection** 79 | - Review the options set to on or off 80 | - These options provide more choices for collecting Windows logs 81 | - Important note: If you have Sysmon installed on your endpoints, ensure "Sysmon Operational" is selected to collect Sysmon logs 82 | 83 | 7. **Configure Metrics Collection** 84 | **NOTE: BE CAREFUL WITH METRICS. RECOMMENDATION IS TO ONLY USE ON SERVERS OR OTHER IMPORTANT ENDPOINTS NEEDING LIVE METRICS. YOU MUST MANUALLY CLICK TO DISABLE.** 85 | - You can choose to collect various metrics from your Windows endpoints 86 | - Review and enable the metrics you're interested in monitoring 87 | 88 | 8. **Save and Deploy** 89 | - After configuring your desired options, save the integration 90 | - Deploy the changes to apply them to the agents using this policy 91 | 92 | ## Important Considerations 93 | 94 | - **Sysmon Integration**: If you're using Sysmon for enhanced logging, make sure to enable the Sysmon Operational log collection. 95 | - **Performance Impact**: Be mindful that collecting more logs and metrics may impact endpoint performance. Balance your monitoring needs with system resources. 96 | - **Regulatory Compliance**: Consider any regulatory requirements you may have when selecting which logs and metrics to collect. 97 | - **Storage Considerations**: More data collection means more storage usage. Ensure your LME system has adequate storage capacity. 98 | - **Review Regularly**: Periodically review your integration settings to ensure they still meet your needs and adjust as necessary. 99 | 100 | By following these steps, you can effectively add and configure the Windows integration to your chosen agent policy in the LME system, allowing for comprehensive logging of your Windows endpoints. 101 | 102 | Apply these same steps to future integrations such as Auditd for Linux. 103 | 104 | ## Troubleshooting Agent Setup: 105 | The Elastic agent has multiple debugging commands that can be run to troubleshoot installs. Please see the link [HERE](https://www.elastic.co/guide/en/fleet/current/elastic-agent-cmd-options.html). 106 | 107 | In addition, you can use this [link](https://www.elastic.co/guide/en/fleet/current/installation-layout.html) to navigate/find the directories for where Elastic agent is installed on the operating system. 108 | 109 | If there are issues with running the command involving a pipe file, the elastic endpoint service (a windows service started by the agent) is in a failed state, and retarting the machine will most likely fix it, check out this [link](https://discuss.elastic.co/t/windows-pipe-elastic-agent-system-access-is-denied/316344) However, this isn't required if the agent is showing as healthy, only if you want to run other cli agent debugging commands. 110 | -------------------------------------------------------------------------------- /docs/markdown/agents/wazuh-active-response.md: -------------------------------------------------------------------------------- 1 | # Example Setup for Wazuh Active Response 2 | 3 | This guide summarizes how to configure Wazuh's active response to defend against SSH brute-force attacks. 4 | 5 | ## Overview 6 | 7 | Wazuh can automatically block IP addresses attempting SSH brute-force attacks using its active response module. This feature executes scripts on monitored endpoints when specific triggers occur. 8 | 9 | ## Configuration Steps 10 | 11 | 1. **Verify Default Script**: 12 | - Check for `firewall-drop` script in `/var/ossec/active-response/bin/` on Linux/Unix systems. 13 | 14 | 2. **Configure Command in wazuh_manager.conf**: Note this command (firewall-drop) already exists. But you can create custom scripts located in the active response/bin path and add new commands into the .conf file located at wazuh_manager.conf located at /opt/lme/config/wazuh_cluster/wazuh_manager.conf 15 | 16 | 17 | 18 | ```xml 19 | 20 | firewall-drop 21 | firewall-drop 22 | yes 23 | 24 | ``` 25 | 26 | 3. **Set Up Active Response**: Looks for the section that says "active-response options here" in the .conf file. Copy and paste the entire configuration below that commented out line. You can continue to add more active response configs below that line. 27 | ```xml 28 | 29 | firewall-drop 30 | local 31 | 5763 32 | 180 33 | 34 | ``` 35 | - This configures a local response, triggering on rule 5763 (SSH brute-force detection), with a 180-second block. 36 | 37 | 4. **Restart Wazuh Manager**: 38 | ```bash 39 | podman restart lme-wazuh-manager 40 | ``` 41 | 42 | ## How It Works 43 | 44 | - When rule 5763 triggers (detecting SSH brute-force attempts), the `firewall-drop` script executes. 45 | - The script uses iptables to block the attacker's IP address for the specified timeout period. 46 | - Wazuh logs the action in `/var/ossec/logs/active-responses.log`. 47 | 48 | ## Monitoring 49 | 50 | - Wazuh dashboard displays alerts when rule 5763 triggers and when an active response occurs. 51 | - The active response alert is typically associated with rule ID 651. These alerts will be displayed in Kibana in the wazuh alerts dashboard. 52 | 53 | ## Testing 54 | 55 | 1. Use a tool like Hydra to simulate a brute-force attack, or you can attempt to SSH into the machine multiple times until it triggers. You will need eight failed SSH attempts to trigger Brute Force. (This can be adjusted in the ruleset manually) 56 | 2. Verify that the attacker's IP is blocked by attempting to ping the target machine. 57 | 58 | ## Custom Responses 59 | 60 | - You can create custom scripts for different actions. 61 | - For custom scripts, ensure you create corresponding rules to analyze the generated logs. 62 | 63 | This setup provides an automated defense against SSH brute-force attacks, enhancing the security of your Linux/Unix systems monitored by Wazuh. 64 | 65 | See a list of Wazuh Rules that trigger here: [Wazuh Ruleset](https://github.com/wazuh/wazuh/tree/master/ruleset/rules) 66 | 67 | Consult Wazuh Documentation for more on active response configuration. 68 | -------------------------------------------------------------------------------- /docs/markdown/endpoint-tools/install-auditd.md: -------------------------------------------------------------------------------- 1 | # Installing and Configuring Auditd on Linux Systems 2 | 3 | This guide will walk you through the process of installing auditd on Linux systems and configuring it with the rules provided by Neo23x0. 4 | 5 | ## Prerequisites 6 | 7 | - Root or sudo access to the Linux system 8 | - Internet connection to download necessary files 9 | 10 | ## Step 1: Install Auditd 11 | 12 | The installation process may vary depending on your Linux distribution. Here are instructions for common distributions: 13 | 14 | ### For Ubuntu/Debian: 15 | 16 | ```bash 17 | sudo apt update 18 | sudo apt install auditd audispd-plugins 19 | ``` 20 | 21 | ### For CentOS/RHEL: 22 | 23 | ```bash 24 | sudo yum install audit audit-libs 25 | ``` 26 | 27 | ### For Fedora: 28 | 29 | ```bash 30 | sudo dnf install audit 31 | ``` 32 | 33 | ## Step 2: Download Neo23x0 Audit Rules (These are used as an example you can write your own rules) 34 | 35 | 1. Open a terminal window. 36 | 2. Download the audit rules file: 37 | ```bash 38 | sudo curl -o /etc/audit/rules.d/audit.rules https://raw.githubusercontent.com/Neo23x0/auditd/master/audit.rules 39 | ``` 40 | 41 | ## Step 3: Configure Auditd 42 | 43 | 1. Open the main auditd configuration file: 44 | ```bash 45 | sudo nano /etc/audit/auditd.conf 46 | ``` 47 | 48 | 2. Review and adjust the settings as needed. 49 | 50 | 3. Save and close the file (in nano, press Ctrl+X, then Y, then Enter). 51 | 52 | ## Step 4: Load the New Rules 53 | 54 | 1. Load the new audit rules: 55 | ```bash 56 | sudo auditctl -R /etc/audit/rules.d/audit.rules 57 | ``` 58 | 59 | 2. Restart the auditd service: 60 | ```bash 61 | sudo service auditd restart 62 | ``` 63 | 64 | ## Step 5: Verify Installation and Rules 65 | 66 | 1. Check if auditd is running: 67 | ```bash 68 | sudo systemctl status auditd 69 | ``` 70 | 71 | 2. Verify that the rules have been loaded: 72 | ```bash 73 | sudo auditctl -l 74 | ``` 75 | 76 | ## Step 6: Test Audit Logging 77 | 78 | 1. Perform some actions that should trigger audit logs (e.g., accessing sensitive files, running specific commands). 79 | 80 | 2. Check the audit log for new entries: 81 | ```bash 82 | sudo ausearch -ts recent 83 | ``` 84 | 85 | ## Updating Audit Rules 86 | 87 | To update the audit rules in the future: 88 | 89 | 1. Download the latest `audit.rules` file from the Neo23x0 GitHub repository (or somewhere else). 90 | 2. Replace the existing file: 91 | ```bash 92 | sudo curl -o /etc/audit/rules.d/audit.rules https://raw.githubusercontent.com/Neo23x0/auditd/master/audit.rules 93 | ``` 94 | 3. Reload the rules and restart auditd: 95 | ```bash 96 | sudo auditctl -R /etc/audit/rules.d/audit.rules 97 | sudo service auditd restart 98 | ``` 99 | 100 | Adjust rules as needed to meet compliance requirements. 101 | 102 | You can now install the auditd elastic integration to collect auditd logs. 103 | 104 | ## Automated Installation Script 105 | 106 | For a more streamlined installation process, you can use the following bash script: 107 | 108 | ```bash 109 | #!/bin/bash 110 | 111 | set -e 112 | 113 | # Ensure the script is run as root 114 | if [ "$EUID" -ne 0 ]; then 115 | echo "Please run as root." 116 | exit 1 117 | fi 118 | 119 | # Inform the user that auditd is being installed 120 | echo "Installing and configuring auditd, please wait..." 121 | 122 | # Determine the OS ID 123 | if [ -f /etc/os-release ]; then 124 | . /etc/os-release 125 | OS_ID="$ID" 126 | else 127 | echo "Cannot determine the operating system." 128 | exit 1 129 | fi 130 | 131 | # Install auditd based on the OS 132 | case "$OS_ID" in 133 | ubuntu|debian) 134 | apt update > /dev/null 2>&1 135 | apt install -y auditd audispd-plugins > /dev/null 2>&1 136 | ;; 137 | centos|rhel) 138 | yum install -y audit > /dev/null 2>&1 139 | ;; 140 | fedora) 141 | dnf install -y audit > /dev/null 2>&1 142 | ;; 143 | *) 144 | echo "Unsupported OS: $OS_ID" 145 | exit 1 146 | ;; 147 | esac 148 | 149 | # Create the rules directory if it doesn't exist 150 | mkdir -p /etc/audit/rules.d > /dev/null 2>&1 151 | 152 | # Download the audit rules 153 | curl -o /etc/audit/rules.d/audit.rules https://raw.githubusercontent.com/Neo23x0/auditd/master/audit.rules > /dev/null 2>&1 154 | 155 | # Load the audit rules, suppressing output and errors 156 | augenrules --load > /dev/null 2>&1 157 | 158 | # Restart the auditd service, suppressing output 159 | systemctl restart auditd > /dev/null 2>&1 160 | 161 | # Notify the user of successful completion 162 | echo "auditd installed and rules applied successfully." 163 | ``` 164 | 165 | To use this script: 166 | 167 | 1. Save it to a file, e.g., `install_auditd.sh` 168 | 2. Make it executable: `chmod +x install_auditd.sh` 169 | 3. Run it with sudo: `sudo ./install_auditd.sh` 170 | -------------------------------------------------------------------------------- /docs/markdown/endpoint-tools/install-sysmon.md: -------------------------------------------------------------------------------- 1 | # Installing Sysmon on Windows Machines 2 | 3 | This guide will walk you through the process of installing Sysmon (System Monitor) on your Windows machine(s) using the SwiftOnSecurity configuration. 4 | 5 | ## Prerequisites 6 | - Administrative access to the Windows machine 7 | - Internet connection to download necessary files 8 | 9 | ## Step 1: Download Sysmon 10 | 1. Visit the official Microsoft Sysinternals Sysmon page: https://docs.microsoft.com/en-us/sysinternals/downloads/sysmon 11 | 2. Click on the "Download Sysmon" link to download the ZIP file 12 | 3. Extract the contents of the ZIP file to a folder on your computer (e.g., `C:\Sysmon`) 13 | 14 | ## Step 2: Download SwiftOnSecurity Configuration 15 | 1. Open a web browser and go to: https://github.com/SwiftOnSecurity/sysmon-config/blob/master/sysmonconfig-export.xml 16 | 2. Click the button to download raw content 17 | 3. Save the file into the Sysmon directory 18 | 19 | ## Step 3: Install Sysmon 20 | 1. Open an elevated Command Prompt (Run as Administrator) 21 | 2. Navigate to the folder where you extracted Sysmon: 22 | ``` 23 | cd C:\Sysmon 24 | ``` 25 | 3. Run the following command to install Sysmon with the SwiftOnSecurity configuration: 26 | ``` 27 | sysmon.exe -accepteula -i sysmonconfig-export.xml 28 | ``` 29 | 30 | ## Step 4: Verify Installation 31 | 1. Open Event Viewer (you can search for it in the Start menu) 32 | 2. Navigate to "Applications and Services Logs" > "Microsoft" > "Windows" > "Sysmon" > "Operational" 33 | 3. You should see events being logged by Sysmon 34 | 35 | ## Updating Sysmon Configuration 36 | To update the Sysmon configuration in the future: 37 | 1. Download the latest `sysmonconfig-export.xml` from the SwiftOnSecurity GitHub repository 38 | 2. Open an elevated Command Prompt 39 | 3. Navigate to the Sysmon folder 40 | 4. Run the following command: 41 | ``` 42 | sysmon.exe -c sysmonconfig-export.xml 43 | ``` 44 | 45 | ## Uninstalling Sysmon 46 | If you need to uninstall Sysmon: 47 | 1. Open an elevated Command Prompt 48 | 2. Navigate to the Sysmon folder 49 | 3. Run the following command: 50 | ``` 51 | sysmon.exe -u 52 | ``` 53 | 54 | ## Additional Notes 55 | - You can now enable sysmon log collection from the Windows elastic agent integration 56 | - Use a shared folder, SCCM, GPO's, or other tools to install on large quantities of machines 57 | -------------------------------------------------------------------------------- /docs/markdown/logging-guidance/cloud.md: -------------------------------------------------------------------------------- 1 | # Logging Made Easy in the cloud 2 | 3 | This page answers some common FAQ about deploying LME in the cloud. 4 | 5 | ## Does LME run in the cloud? 6 | Yes, LME is a client-server model and accommodates both on-premises and cloud deployments, allowing organizations to host LME on local or cloud service provider (CSP) infrastructure. 7 | 8 | 9 | 10 | ## Deploying LME in the cloud for on prem systems: 11 | For LME agents to talk to LME in the cloud, you'll need to ensure the clients you want to monitor can communicate through: 1) the cloud firewall and 2) LME' own server firewall. 12 | 13 | ![cloud firewall](/docs/imgs/lme-cloud.jpg) 14 | 15 | The easiest way is to make sure you can access these LME server ports from the on premise client: 16 | - Wazuh Agent ([Agent Enrollment Requirements Documentation](https://documentation.wazuh.com/current/user-manual/agent/agent-enrollment/requirements.html)): 1514,1515 17 | - Elastic Agent ([Agent Install Documentation](https://www.elastic.co/guide/en/elastic-stack/current/installing-stack-demo-self.html#install-stack-self-elastic-agent)): 8220 (fleet commands), 9200 (input to elasticsearch) 18 | 19 | You'll need to make sure your Cloud firewall is setup to allow the ports above. On Azure, network security groups (NSG) run a firewall on your virtual machine;s network interfaces. You'll need to update your LME virtual machine's rules to allow inbound connections on the agent ports. Azure has a detailed guide for how to add security rules [here](https://learn.microsoft.com/en-us/azure/virtual-network/manage-network-security-group?tabs=network-security-group-portal#create-a-security-rule). 20 | 21 | ##### ***We highly suggest you do not open ANY PORT globally and restrict it based on your clients IP address or your client's subnets.**** 22 | 23 | On LME, you'll want to make sure you have the firewall disabled (if you're using the cloud firewall as the main firewall): 24 | ``` 25 | lme-user@ubuntu:~$ sudo ufw status 26 | Status: inactive 27 | ``` 28 | or that you have the firewall rules enabled: 29 | ``` 30 | lme-user@ubuntu:~$ sudo ufw status 31 | Status: active 32 | 33 | To Action From 34 | -- ------ ---- 35 | 1514 ALLOW Anywhere 36 | 1515 ALLOW Anywhere 37 | 22 ALLOW Anywhere 38 | 8220 ALLOW Anywhere 39 | 1514 (v6) ALLOW Anywhere (v6) 40 | 1515 (v6) ALLOW Anywhere (v6) 41 | 22 (v6) ALLOW Anywhere (v6) 42 | 8220 (v6) ALLOW Anywhere (v6) 43 | ``` 44 | 45 | You can add the above ports to ufw via the following command: 46 | ``` 47 | sudo ufw allow 1514 48 | sudo ufw allow 1515 49 | sudo ufw allow 8220 50 | sudo ufw allow 9200 51 | ``` 52 | If you want to use the Wazuh API, you'll also need to setup port 55000 to be allowed in: 53 | ``` 54 | sudo ufw allow 55000 55 | ``` 56 | 57 | In addition, you'll need to setup rules to forward traffic to the container network and allow traffic to run on the container network: 58 | ``` 59 | ufw route allow in on eth0 out on podman1 to any port 443,1514,1515,5601,8220,9200 proto tcp 60 | ufw route allow in on podman1 61 | ``` 62 | Theres a helpful stackoverflow article on [Configuring UFW for podman on port 443](https://stackoverflow.com/questions/70870689/configure-ufw-for-podman-on-port-443) 63 | Your `podman1` interface name may be different. Check the output of your network interfaces by running the following command to check if your interface is also called podman1: 64 | ``` 65 | sudo -i podman network inspect lme | jq 'map(select(.name == "lme")) | map(.network_interface) | .[]' 66 | ``` 67 | 68 | Your rules can be dumped and shown like so: 69 | ``` 70 | root@ubuntu:~# ufw show added 71 | Added user rules (see 'ufw status' for running firewall): 72 | ufw allow 22 73 | ufw allow 1514 74 | ufw allow 1515 75 | ufw allow 8220 76 | ufw route allow in on eth0 out on podman1 to any port 443,1514,1515,5601,8220,9200 proto tcp 77 | ufw allow 443 78 | ufw allow in on podman1 79 | ufw allow 9200 80 | root@ubuntu:~# 81 | ``` 82 | 83 | ### Deploying LME for cloud infrastructure: 84 | 85 | Every cloud setup is different. As long as the LME server is on the same network and able to talk to the machines you want to monitor, your deployment should run smoothly. 86 | 87 | ## Other firewall rules 88 | You may also want to access kibana from outside the cloud as well. You'll want to make sure you either allow port `5601` or port `443` inbound from the cloud firewall AND virtual machine firewall. 89 | 90 | ``` 91 | root@ubuntu:/opt/lme# sudo ufw allow 443 92 | Rule added 93 | Rule added (v6) 94 | ``` 95 | 96 | ``` 97 | root@ubuntu:/opt/lme# sudo ufw status 98 | Status: active 99 | 100 | To Action From 101 | -- ------ ---- 102 | 22 ALLOW Anywhere 103 | 1514 ALLOW Anywhere 104 | 1515 ALLOW Anywhere 105 | 8220 ALLOW Anywhere 106 | 443 ALLOW Anywhere 107 | 22 (v6) ALLOW Anywhere (v6) 108 | 1514 (v6) ALLOW Anywhere (v6) 109 | 1515 (v6) ALLOW Anywhere (v6) 110 | 8220 (v6) ALLOW Anywhere (v6) 111 | 443 (v6) ALLOW Anywhere (v6) 112 | ``` 113 | 114 | ### Don't lock yourself out AND enable the firewall 115 | 116 | You also don't want to lock yourself out of ssh, so make sure to enable port 22! 117 | ``` 118 | sudo ufw allow 22 119 | ``` 120 | 121 | Enable ufw: 122 | ``` 123 | sudo ufw enable 124 | ``` 125 | 126 | 127 | -------------------------------------------------------------------------------- /docs/markdown/logging-guidance/filtering.md: -------------------------------------------------------------------------------- 1 | # Filtering logs: 2 | 3 | Sometimes a log is not particularly useful or an aspect of LME cloud prove overly verbose (e.g.: [Dashboard spamming events](https://github.com/cisagov/LME/issues/22). We try our best to make everything useful but cannot predict every possibility since all environments will be different. So to enable users to make LME more useful (and hopefully commit their own pull requests back with updates :) ),we document here how you can filter out logs in the: 4 | 5 | 1. Dashboard 6 | 2. Host logging utility (e.g. winlogbeat) 7 | 3. Serverside (e.g. logstash) 8 | 9 | Have fun reading and applying these concepts 10 | 11 | ## Dashboard: 12 | 13 | The below example shows how users can apply a filter to a search, and saved with a dashboard to filter out unneeded windows event log [4624](https://www.ultimatewindowssecurity.com/securitylog/encyclopedia/event.aspx?eventID=4624) with a TargetUserName field that has a `$ `. 14 | ``` 15 | { 16 | "bool": { 17 | "filter": [ 18 | { 19 | "match_phrase": { 20 | "event.code": "4624" 21 | } 22 | } 23 | ], 24 | "must_not": [ 25 | { 26 | "regexp": { 27 | "winlog.event_data.TargetUserName": ".*$.*" 28 | } 29 | } 30 | ] 31 | } 32 | } 33 | ``` 34 | 35 | To Add: 36 | 1. Click the `Add filter`: 37 | 2. Click `Edit as DSL` to add a regexp filter: 38 | 39 | Users can find many resources here and more relevant examples on stackoverflow: 40 | - https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl.html 41 | - https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-query-string-query.html#query-string-syntax 42 | - https://www.elastic.co/guide/en/elasticsearch/reference/current/regexp-syntax.html 43 | ``` 44 | -------------------------------------------------------------------------------- /docs/markdown/logging-guidance/retention.md: -------------------------------------------------------------------------------- 1 | # Retention Settings 2 | 3 | By default, LME will configure an index lifecycle policy that will delete 4 | indexes based on estimated disk usage. Initially, 80% of the disk will be used 5 | for the indices, with an assumption that a day of logs will use 1Gb of disk 6 | space. 7 | 8 | If you wish to adjust the number of days retained,do this in kibana. 9 | First, select the `lme_ilm_policy` from the "Index Lifecycle Policies" 10 | list: 11 | 12 | ![Retention settings](/docs/imgs/retention_pics/retention_1.png) 13 | 14 | Next, scroll to the bottom of the settings page and adjust the "Delete phase" 15 | setting as appropriate. 16 | 17 | ![Retention delete phase settings](/docs/imgs/extra_beats_pics/update-retention.png) 18 | 19 | Users must ensure that the retention period is appropriate for the 20 | disk space available. If disk space is exhausted then the solution will 21 | experience performance issues and new logs will not be recorded. By default, 22 | Elasticsearch will not allocate shards to any nodes that are using 85% or more 23 | of the available disk space. See [the Elasticsearch 24 | documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/disk-allocator.html) 25 | (the `cluster.routing.allocation.disk.watermark.low` setting in particular) for 26 | more information. 27 | 28 | Click the "save policy" button and the new setting will apply to the LME indices. 29 | Immediately,ensure that the new policy does not result in unwanted data loss by 30 | reducing the retention period, which would cause existing logs to be deleted. 31 | -------------------------------------------------------------------------------- /docs/markdown/maintenance/Encryption_at_rest_option_for_users.md: -------------------------------------------------------------------------------- 1 | # Encryption at rest option for users.md 2 | 3 | To ensure encryption at rest for all data managed by Elastic Cloud Enterprise, the hosts running Elastic Cloud Enterprise must be configured with disk-level encryption, such as dm-crypt. Elastic Cloud Enterprise does not implement encryption at rest out of the box. 4 | 5 | Since Elastic doesn't support data encryption at rest, it provides a paid option outside of disk-level encryption available to users. This option is called X-pack. 6 | 7 | The X-pack security feature provides a secure and compliant way to protect data in Elasticsearch. 8 | 9 | X-pack has a 30-day trial and once trial is over, users might need to acquire a platinum license to keep using some of the X-pack features including data encryption. For more information, see: 10 | 11 | [Elastic Security Considerations - Encryption](https://www.elastic.co/guide/en/cloud-enterprise/current/ece-securing-considerations.html#:~:text=To%20ensure%20encryption%20at%20rest,encrypted%20at%20rest%20as%20well) 12 | 13 | [Deep Dive into X-Pack Elasticsearch: Advanced Features and Implementation](https://opster.com/guides/elasticsearch/security/x-pack/#:~:text=X%2DPack%20is%20an%20Elastic,features%20you%20want%20to%20use) 14 | -------------------------------------------------------------------------------- /docs/markdown/maintenance/backups.md: -------------------------------------------------------------------------------- 1 | # Backing up LME Logs 2 | 3 | Logs are backed up using the built-in Elastic facilities. Out of the box, 4 | Elasticsearch supports backing up to filesystems. This is the only approach 5 | supported by LME. Other backup destinations are supported but these require 6 | separate plugins that are not supported by LME. 7 | 8 | ## Approach 9 | 10 | Backups are created using Elasticsearch snapshots. The initial snapshot will 11 | contain all of the current logs but subsequent backups will only contain changes 12 | since the last snapshot was taken. It is therefore possible to take regular 13 | backups without a significant effect on the system's performance and without 14 | consuming large amounts of disk space. 15 | 16 | ## Setting up a backup schedule 17 | 18 | ### Create a filesystem repository 19 | 20 | LME sets up a Podman volume called `lme_backups` so that backups can be saved outside the container. 21 | 22 | **Note**: If backup storage becomes an issue, LME team plans to add documentation soon for how to manage the size and storage location of backups 23 | 24 | You will need to create a repository for Elastic to use, which can be done through the Kibana interface. 25 | 26 | First navigate to the "Snapshot and Restore" page under the `Stack Management` tab: 27 | 28 | ![Snapshot and Restore](/docs/imgs/backup_pics/snapshot_and_restore.png) 29 | 30 | Then create a repository by clicking the "Register a repository" button and 31 | filling in the following screens: 32 | 33 | ![Repository one](/docs/imgs/backup_pics/repository_1.png) 34 | 35 | The repository is named "LME-backups," but you can choose any name. Select the "Shared file system" repository type. 36 | 37 | On the next screen, the file system location should be set to 38 | `/usr/share/elasticsearch/backups`. The other fields can be left with the default values, or modified as required. 39 | 40 | ![Repository two](/docs/imgs/backup_pics/repository_2.png) 41 | 42 | The repository will be created and will show in the list on the `Stack Management` 43 | screen: 44 | 45 | ![Repository three](/docs/imgs/backup_pics/repository_3.png) 46 | 47 | ### Create a snapshot schedule policy 48 | 49 | You then need to create a policy for the backups. Select the "Policies" tab and 50 | then click the "Create a policy" button as seen below: 51 | 52 | ![Policy One](/docs/imgs/backup_pics/policy_1.png) 53 | 54 | On the next screen, pick a name for your new policy ("lme-snapshots" in this 55 | example). For the snapshot name the value `` will create 56 | files with the prefix `lme-daily` and with the current date as a suffix. Make 57 | sure your new repository is selected, and then configure a schedule in line with 58 | your backup policy. Elasticsearch uses incremental snapshots for its backup, 59 | and so only the previous day's logs will need to be snapshotted, which will help 60 | minimize the performance impact. 61 | 62 | ![Policy Two](/docs/imgs/backup_pics/policy_2.png) 63 | 64 | Leave the next screen with its default values and click "Next": 65 | 66 | ![Policy Three](/docs/imgs/backup_pics/policy_3.png) 67 | 68 | If desired, configure the next screen with the relevant retention settings based on your available disk space and your backup policy and then click "Next": 69 | 70 | ![Policy Four](/docs/imgs/backup_pics/policy_4.png) 71 | 72 | Review the new policy and click "Create policy". 73 | 74 | ![Policy Five](/docs/imgs/backup_pics/policy_5.png) 75 | 76 | To test the new policy or to create the initial snapshot, you can 77 | select the "Run now" option for the policy on the polices tab: 78 | 79 | ![Policy Six](/docs/imgs/backup_pics/policy_6.png) 80 | 81 | ## Backup management 82 | 83 | Snapshots will now be periodically written to the volume `lme_backups`. 84 | 85 | You can find the location on disk of these backups at: 86 | ```bash 87 | sudo -i 88 | podman volume mount lme_backups 89 | cd /var/lib/containers/storage/volumes/lme_backups/_data 90 | ls 91 | ``` 92 | 93 | it should look somehting like this: 94 | ```bash 95 | root@ubuntu:/var/lib/containers/storage/volumes/lme_backups/_data# ls 96 | index-0 index.latest indices meta-cuPUnpl1S0Sx8IkPIWLoEA.dat snap-cuPUnpl1S0Sx8IkPIWLoEA.dat 97 | ``` 98 | 99 | You can now save/backup/etc... however you would like 100 | 101 | **Make sure to unmount when done** 102 | ```bash 103 | podman volume unmount lme_backups 104 | ``` 105 | 106 | Manage these in line with your backup policies and processes. 107 | 108 | # Restoring a backup: 109 | 110 | These steps will walk you through restoring backups assuming you have a new Elasticsearch instance with old log backups from a previous LME. 111 | If you wish to restore a backup follow the below steps: 112 | 113 | 1. Navigate to Stack-Management -> Snapshot and Restore -> Repositories: 114 | ![NavBar](/docs/imgs/nav-bar.png) 115 | ![snaprestore](/docs/imgs/snap-restore.png) 116 | 2. Register a new repository following the same directions as above to reference the mounted host directory at the container path. [Create a filesystem repository](#Create-a-filesystem-repository) 117 | 3. Verify the Repository is connected by hitting the `Verify Repository` button. You should see a similar prompt circled in blue below: 118 | ![verify](/docs/imgs/verify.png) 119 | 4. Under snapshots you should now see your old lme backup in the `LMEBackups` Repository: 120 | ![restore](/docs/imgs/restore.png) 121 | 5. Restore using the logistics tab -> settings -> Review 122 | ![logistics](/docs/imgs/logistics.png) 123 | 6. If you encounter the below error, you will need to change the index settings to successfully restore your backups. You can either: (1) rename the indexes on the `logistics` page, OR (2) close your current indexes that have name conflicts. Follow below for both options 124 | ![error](/docs/imgs/error.png) 125 | 126 | ## Rename the indexes on import: 127 | 1. Usually all you'll want is the winlogbeat data, we can rename that as shown below. Make sure you uncheck `restore aliases` otherwise Elastic will think you're restoring multiple indices (the old and the new renamed one). 128 | ![restore-details](/docs/imgs/restore-details.png) 129 | 2. Restore just like in the above directions. 130 | 131 | 132 | ## Close current indexes to enable importing the old: 133 | 1. Navigate to `Stack-Management -> Data -> Index Management` on the navbar. 134 | 2. Close the conflicting index that currently exists: 135 | ![close](/docs/imgs/close-index.png) 136 | 137 | -------------------------------------------------------------------------------- /docs/markdown/maintenance/certificates.md: -------------------------------------------------------------------------------- 1 | # Certificates 2 | 3 | The LME installation makes use of a number of TLS certificates to protect communications between the server components and agents, and also secures the connections between Elasticsearch and Kibana. 4 | By default the installation will create certificates and this documentation describes how to modify and update the cert store. 5 | 6 | ## Regenerating Self-Signed Certificates 7 | The easiest way to do this is to delete the `lme_certs` volume, and restart lme.service: 8 | 9 | This is destructive and not recommended, but there could be cases. 10 | ```bash 11 | sudo -i podman volume rm lme_certs 12 | sudo systemctl restart lme.service 13 | ``` 14 | 15 | ## Using Your Own Certificates 16 | You can use certificates signed by an existing root CA as part of the LME install by generating certificates manually with the correct settings and placing these within the required directory inside the LME folder. 17 | 18 | **NOTE: The default supported method of LME installation is to use the automatically created self-signed certificates, and we will be unable to support any problems that arise from generating the certificates manually incorrectly.** 19 | 20 | ### Certificate Creation 21 | If you create certificates ensure their subject alt names allow for the ips/dns entries listed below, as well as the ips/domains you'll be connecting to the service as: 22 | ```bash 23 | root@ubuntu:~# cat /opt/lme/config/setup/instances.yml | head -n 30 24 | # Add host IP address / domain names as needed. 25 | 26 | instances: 27 | - name: "elasticsearch" 28 | dns: 29 | - "lme-elasticsearch" 30 | - "localhost" 31 | ip: 32 | - "127.0.0.1" 33 | 34 | - name: "kibana" 35 | dns: 36 | - "lme-kibana" 37 | - "localhost" 38 | ip: 39 | - "127.0.0.1" 40 | 41 | - name: "fleet-server" 42 | dns: 43 | - "lme-fleet-server" 44 | - "localhost" 45 | ip: 46 | - "127.0.0.1" 47 | 48 | - name: "wazuh-manager" 49 | dns: 50 | - "lme-wazuh-manager" 51 | - "localhost" 52 | ip: 53 | - "127.0.0.1" 54 | ``` 55 | 56 | For example, the new kibana cert would need to support the above alternative names. You can also ensure its setup properly by viewing the current cert (assuming you've already mounted the `lme_certs` podman volume). 57 | ```bash 58 | root@ubuntu:~$ cat /var/lib/containers/storage/volumes/lme_certs/_data/kibana/kibana.crt | openssl x509 -text | grep -i Alternative -A 1 59 | X509v3 Subject Alternative Name: 60 | DNS:lme-kibana, IP Address:127.0.0.1, DNS:localhost 61 | ``` 62 | 63 | 64 | ### Certificate Locations 65 | All the certs are stored in the lme_certs volume. Here is how to list/change/modify the contents: 66 | 67 | ```bash 68 | root@ubuntu:$ podman volume mount lme_certs 69 | /var/lib/containers/storage/volumes/lme_certs/_data 70 | root@ubuntu:$ cd /var/lib/containers/storage/volumes/lme_certs/_data/ 71 | root@ubuntu:/var/lib/containers/storage/volumes/lme_certs/_data$ tree 72 | . 73 | ├── ACCOUNTS_CREATED 74 | ├── ca 75 | │   ├── ca.crt 76 | │   └── ca.key 77 | ├── ca.zip 78 | ├── caddy 79 | │   ├── caddy.crt 80 | │   └── caddy.key 81 | ├── certs.zip 82 | ├── curator 83 | │   ├── curator.crt 84 | │   └── curator.key 85 | ├── elasticsearch 86 | │   ├── elasticsearch.chain.pem 87 | │   ├── elasticsearch.crt 88 | │   └── elasticsearch.key 89 | ├── fleet-server 90 | │   ├── fleet-server.crt 91 | │   └── fleet-server.key 92 | ├── kibana 93 | │   ├── kibana.crt 94 | │   └── kibana.key 95 | ├── logstash 96 | │   ├── logstash.crt 97 | │   └── logstash.key 98 | └── wazuh-manager 99 | ├── wazuh-manager.crt 100 | └── wazuh-manager.key 101 | ``` 102 | 103 | To edit the certs/replace the certs, copy the new desired certificate and key to the above location on the disk: 104 | ``` 105 | cp ~/new_kibana_cert.crt /var/lib/containers/storage/volumes/lme_certs/_data/kibana.crt 106 | cp ~/new_kibana_key.key /var/lib/containers/storage/volumes/lme_certs/_data/kibana.key 107 | ``` 108 | 109 | ## Migrating from Self-Signed Certificates 110 | 111 | You can migrate from the default self-signed certificates to manually generated certificates at a later date, for example to move to enterprise certificates post-installation after an initial testing period. 112 | 113 | **NOTE: The default supported method of LME installation is to use the automatically created self-signed certificates, and we will be unable to support any problems that arise from generating the certificates manually incorrectly.** 114 | 115 | Simply replace the certs above within the given container for the given service that you would like LME to use. If the certs are signed, ensure you also include the root ca in the appropriate location as well. 116 | -------------------------------------------------------------------------------- /docs/markdown/maintenance/index-management.md: -------------------------------------------------------------------------------- 1 | # Elasticsearch Index Lifecycle Overview 2 | 3 | Elasticsearch uses Index Lifecycle Management (ILM) to manage data over time. There are four phases: 4 | 5 | 1. Hot Phase 6 | - Newest data 7 | - Most active: frequent updates and searches 8 | - Needs fastest access 9 | 10 | 2. Warm Phase 11 | - Older data 12 | - Less active: fewer updates, still searched 13 | - Can be on slightly slower storage 14 | 15 | 3. Cold Phase 16 | - Oldest data 17 | - Rarely accessed, no updates 18 | - Can be on slowest storage 19 | 20 | 4. Delete Phase 21 | - Data no longer needed 22 | - Removed from the system 23 | 24 | Data moves through these phases based on what is called the Index Lifecycle Policy. 25 | 26 | # Creating a Lifecycle Policy 27 | 28 | ## Create Lifecycle Policy for Wazuh Indexes in Elasticsearch 29 | 30 | 1. Login to Kibana and go to Menu -> Dev Tools 31 | 32 | 2. Create an ILM policy by copying and pasting the following code and then pressing the run button that looks like a 'play' symbol: 33 | 34 | ```bash 35 | PUT _ilm/policy/wazuh_alerts_cleanup_policy 36 | { 37 | "policy": { 38 | "phases": { 39 | "hot": { 40 | "min_age": "0ms", 41 | "actions": {} 42 | }, 43 | "delete": { 44 | "min_age": "30d", 45 | "actions": { 46 | "delete": {} 47 | } 48 | } 49 | } 50 | } 51 | } 52 | ``` 53 | 54 | It will look like so: 55 | 56 | ![image](https://github.com/user-attachments/assets/962c3f8e-4a7b-4037-beaf-ea2e597fbe2d) 57 | 58 | 3. Perform the same steps for the following snippets of code: 59 | 60 | ```bash 61 | PUT _index_template/wazuh_alerts_template 62 | { 63 | "index_patterns": ["wazuh-alerts-4.x-*"], 64 | "template": { 65 | "settings": { 66 | "index.lifecycle.name": "wazuh_alerts_cleanup_policy" 67 | } 68 | } 69 | } 70 | ``` 71 | 72 | ```bash 73 | PUT wazuh-alerts-4.x-*/_settings 74 | { 75 | "index.lifecycle.name": "wazuh_alerts_cleanup_policy" 76 | } 77 | ``` 78 | 79 | This will create a policy, create a template that applies this policy to all new indices, and then also apply the policy to existing Wazuh indices. 80 | 81 | **NOTE: This is an example that will delete wazuh indices after 30 days. Adjust as needed.** 82 | 83 | ## Elastic Endpoint Lifecyle policy 84 | 85 | Your Elastic agent logs are managed by a policy called "logs" 86 | 87 | 1. Navigate to Index Lifecycle policies, toggle the switch for "Include managed system policies" and then search for "logs" 88 | 89 | 2. Click to edit this policy. You will see warnings that editing a managed policy can break Kibana. Assuming you set your phases properly, you can ignore this warning. 90 | 91 | 3. By default, the setup "rolls over" when an index is 30 days old or exceeds 50 GB. Rollover renames the index and creates a new one to manage shard size without deleting the previous index I.E. logs-00001 rolls over to logs-00002. 00001 remains, its just not 'active' 92 | 93 | 4. Set your Hot, Warm, Cold phase as you see fit. 94 | 95 | 5. After you turn on "Cold Phase" you must hit the trash can switch to turn on the delete phase. 96 | 97 | 6. After you apply these changes to your policy please allow it some time to take effect on all indices. 98 | 99 | **Note**: You can also just completely skip these steps and manually delete indices from the UI as you see fit / when needed. 100 | 101 | **Note**: By default your rollover policy is set for 30 days. Do not set your 'delete' phase to be shorter than your rollover phase. You need your active indices to rollover into inactive indices before you delete them.** 102 | -------------------------------------------------------------------------------- /docs/markdown/maintenance/upgrading.md: -------------------------------------------------------------------------------- 1 | # Upgrading 2 | This page serves as a landing page for future upgrading when we release new versions. 3 | 4 | ## Upgrading for Existing LME 1.4 Users: 5 | Currently the only upgrade path is from 1.4 -> 2.0 [Click here for more information on upgrading from LME 1.4](/scripts/upgrade/README.md). 6 | 7 | ### Other LME 1.X versions: 8 | Before upgrading to 2.0, we highly recommend upgrading to 1.4 if you are using an earlier version as that is the only supported upgrade path defined in the link above. 9 | 10 | 11 | 12 | -------------------------------------------------------------------------------- /docs/markdown/maintenance/vulnerability-scan-setup.md: -------------------------------------------------------------------------------- 1 | # Adding the Wazuh Vulnerability Index in Kibana 2 | 3 | ## Steps to Create the Data View 4 | 5 | 1. Navigate to Stack Management 6 | - Click the hamburger menu (☰) in the top left corner 7 | - Select "Stack Management" 8 | 9 | 2. Access Data Views 10 | - Under the Kibana section, click "Data Views" 11 | 12 | 3. Create New Data View 13 | - Click the "Create new data view" button 14 | - Configure the following settings: 15 | - Name: **wazuh-states-vulnerabilities-wazuh-manager** 16 | - Index pattern: `wazuh-states-vulnerabilities-wazuh-manager` 17 | - Timestamp field: `vulnerability.detected_at` 18 | - Click "Save data view to Kibana" 19 | 20 | ## Viewing Vulnerability Results 21 | 22 | - Once configured, you can view results in the Wazuh vulnerability dashboard 23 | - **Important Note**: After installing your first agent, please allow one to two hours for: 24 | - Wazuh to download the vulnerability database 25 | - Complete the initial vulnerability scan -------------------------------------------------------------------------------- /docs/markdown/maintenance/wazuh-configuration.md: -------------------------------------------------------------------------------- 1 | # Wazuh Configuration Management 2 | 3 | ## Managing Wazuh Configuration File 4 | 5 | The Wazuh manager configuration file in the LME setup is located at: 6 | 7 | ``` 8 | /opt/lme/config/wazuh_cluster/wazuh_manager.conf 9 | ``` 10 | 11 | This file is mounted into the Wazuh manager container running in Podman. Here's how to manage this configuration: 12 | 13 | ### Editing the Configuration File 14 | 15 | 1. Open the file with your preferred text editor (you may need sudo privileges): 16 | ``` 17 | sudo nano /opt/lme/config/wazuh_cluster/wazuh_manager.conf 18 | ``` 19 | 20 | 2. Make the necessary changes to the configuration file. Some important sections you might want to modify include: 21 | - ``: Global settings for Wazuh 22 | - ``: Define rules and decoders 23 | - ``: File integrity monitoring settings 24 | - ``: Rootkit detection settings 25 | - ``: Wazuh modules configuration 26 | 27 | 3. Save the changes and exit the editor. 28 | 29 | ### Applying Configuration Changes 30 | 31 | After modifying the configuration file, you need to restart the Wazuh manager service for the changes to take effect: 32 | 33 | 1. Restart the Wazuh manager container: 34 | ``` 35 | podman restart lme-wazuh-manager 36 | ``` 37 | 38 | or with systemctl 39 | 40 | ``` 41 | sudo systemctl restart lme-wazuh-manager.service 42 | ``` 43 | 44 | 2. Check the status of the Wazuh manager to ensure it started successfully: 45 | ``` 46 | podman logs lme-wazuh-manager 47 | ``` 48 | 49 | This command will validate your configuration and report any errors. 50 | 51 | ### Best Practices 52 | 53 | 1. Always backup the configuration file before making changes: 54 | ``` 55 | sudo cp /opt/lme/config/wazuh_cluster/wazuh_manager.conf /opt/lme/config/wazuh_cluster/wazuh_manager.conf.bak 56 | ``` 57 | 58 | 2. Use comments in the configuration file to document your changes. 59 | 60 | 3. Test configuration changes in a non-production environment before applying them to your production setup. 61 | 62 | 4. Regularly review and update your Wazuh configuration to ensure it aligns with your current security needs and policies. 63 | 64 | Remember to consult the [official Wazuh documentation](https://documentation.wazuh.com/current/user-manual/reference/ossec-conf/index.html) for detailed information on all available configuration options. -------------------------------------------------------------------------------- /docs/markdown/prerequisites.md: -------------------------------------------------------------------------------- 1 | # Prerequisites 2 | 3 | 4 | ## What kind of IT skills do I need to install LME? 5 | 6 | Users with a background in systems or network administration can download LME. If you have ever… 7 | 8 | * Installed a Windows server and connected it to an Active Directory domain 9 | * Changed firewall rules 10 | * Installed a Linux operating system and logged in over SSH 11 | 12 | … then you are likely to have the skills to install LME! 13 | 14 | We estimate that you should allow half an hour to complete the entire installation process. We have automated steps where possible and made the instructions as detailed as possible. 15 | 16 | The following time table of real recorded times will provide you a reference of how long the installation may take to complete. 17 | 18 | ### Estimated Installation Times 19 | 20 | | Milestones | Time | Timeline | 21 | | ------------- | ------------- | ------------- | 22 | | Download LME | 0:31.49 | 0:31.49 | 23 | | Set Environment | 0:35.94 | 1:06.61 | 24 | | Install Ansible | 1:31.94 | 2:38.03 | 25 | | Installing LME Ansible Playbook | 4:03.63 | 6:41.66 | 26 | | All Containers Active | 6:41.66 | 13:08.92 | 27 | | Accessing Elastic | 0:38.97 | 13:47.60 | 28 | | Post-Install Ansible Playbook | 2:04.34 | 15:51.94 | 29 | | Deploy Linux Elastic Agent | 0:49.95 | 16:41.45 | 30 | | Deploy Windows Elastic Agent | 1:32.00 | 18:13.40 | 31 | | Deploy Linux Wazuh Agent | 1:41.99 | 19:55.34 | 32 | | Deploy Windows Wazuh Agent | 1:55.00 | 21:51.22 | 33 | | Download LME Zip on Windows | 2:22.43 | 24:13.65 | 34 | | Install Sysmon | 1:04.34 | 25:17.99 | 35 | | Windows Integration | 0:39.93 | 25:57.27 | 36 | 37 | ## High level overview diagram of the LME system architecture 38 | 39 | ![diagram](/docs/imgs/lme-architecture-v2.png) 40 | 41 | Please see the [ReadMe](/README.md#Diagram) for a detailed description of of LME's architecture and its components. 42 | 43 | ## How much does LME cost? 44 | 45 | Creative Commons 0 ("CC0") license. Government contractors, working for CISA, provide portions with rights to use, modify, and redistribute under this statement and the current license structure. All other portions, including new submissions, fall under the Apache License, Version 2.0 46 | This project (scripts, documentation, and so on) is licensed under the [Apache License 2.0 and Creative Commons 0](../../LICENSE). 47 | 48 | The design uses open software which comes at no cost to the user. CISA will ensure that no paid software licenses are needed above standard infrastructure costs (With the exception of Windows Operating System Licensing). 49 | 50 | Users must pay for hosting, bandwidth and time; for an estimate of server specs that might be needed, see this [blogpost from elasticsearch](https://www.elastic.co/blog/benchmarking-and-sizing-your-elasticsearch-cluster-for-logs-and-metrics) then use your estimated server specs to determine a price for an on premise or cloud deployment. 51 | 52 | 53 | ## Scaling the solution 54 | To keep LME simple, our guide only covers single server setups. Considering the differences across environments and scaling needs, we cannot provide an estimate of server resources beyond single server setups. 55 | It’s possible to scale the solution to multiple event collectors and ELK nodes, but that will require more experience with the technologies involved. However, we plan to publish documentation for scaling LME in the future. 56 | 57 | Please see the above blogpost from elastic for discussion on how to scale an elastic stack cluster. 58 | 59 | ## Required infrastructure 60 | 61 | To begin installing LME, you will need access to the following servers or you will need to create them: 62 | 63 | - A client machine (or multiple client machines) you would like to monitor. 64 | - An Ubuntu linux 22.04 server. 65 | 66 | We will install our database (Elasticsearch) and dashboard software on this machine. This is all taken care of through Podman containers. 67 | 68 | ### Minimum Hardware Requirements: 69 | - CPU: 2 processor cores, 4+ recommended 70 | - MEMORY: 16GB RAM, (32GB+ recommended by [Elastic](https://www.elastic.co/guide/en/cloud-enterprise/current/ece-hardware-prereq.html)), 71 | - STORAGE: dedicated 128GB storage for ELK (not including storage for OS and other files) 72 | This is estimated to only support ~17 clients worth of log streaming data per day. Elasticsearch will automatically purge old logs to make space for new ones. We **highly** suggest more storage than 128GB for any enterprise network greater than this. 73 | 74 | If you need to run LME with less than 16GB of RAM or minimal hardware, please follow our troubleshooting guide to configure Podman quadlets for reduced memory usage. We recommend setting Elasticsearch to an 8GB limit and Kibana to a 4GB limit. You can find the guide [here](/docs/markdown/reference/troubleshooting.md#memory-in-containers-need-more-ramless-ram-usage). 75 | 76 | #### Confirm your system meets the minimum hardware requirements: 77 | **CPU**: To check the number of CPUs, run the following command: 78 | ```bash 79 | $ lscpu | egrep 'CPU\(s\)' 80 | ``` 81 | **Memory**: To check your available memory, run this command, look under the "free" column: 82 | ```bash 83 | $ free -h 84 | total used free shared buff/cache available 85 | Mem: 31Gi 6.4Gi 22Gi 4.0Mi 2.8Gi 24Gi 86 | Swap: 0B 0B 0B 87 | ``` 88 | 89 | **Storage**: To check available hardware storage, typically the /dev/root will be your main filesystem. The number of gigabytes available is in the Avail column 90 | ```bash 91 | $ df -h 92 | Filesystem Size Used Avail Use% Mounted on 93 | /dev/root 124G 13G 112G 11% / 94 | ``` 95 | 96 | ## Where to install the servers 97 | 98 | Servers can be either on premise, in a public cloud, or in a private cloud. It is your choice, but you'll need to consider how to network between the clients and servers. 99 | 100 | ## What firewall rules are needed? 101 | Please see our cloud documentation for a discussion on firewalls [LME in the Cloud](/docs/markdown/loggging-guidance/cloud.md). 102 | 103 | You must ensure that the client machine you want to monitor can reach the main LME ports as described in the ReadMe [Required Ports section](/README.md#required-ports). -------------------------------------------------------------------------------- /docs/markdown/reference/architecture.md: -------------------------------------------------------------------------------- 1 | # Architecture: 2 | LME runs on Ubuntu 22.04 and leverages Podman containers for security, performance, and scalability. We’ve integrated Wazuh, Elastic, and ElastAlert open source tools to provide log management, endpoint security monitoring, alerting, and data visualization capabilities. This modular, flexible architecture supports efficient log storage, search, and threat detection, and enables you to scale as your logging needs evolve. 3 | 4 | ### Diagram: 5 | 6 | ![diagram](/docs/imgs/lme-architecture-v2.png) 7 | 8 | ### Containers: 9 | Containerization allows each component of LME to run independently, increasing system security, improving performance, and making troubleshooting easier. 10 | 11 | LME uses Podman as its container engine because it is more secure (by default) against container escape attacks than other engines like Docker. It's far more debug and programmer friendly. We’re making use of Podman’s unique features, such as Quadlets (Podman's systemd integration) and User Namespacing, to increase system security and operational efficiency. 12 | 13 | LME uses these containers: 14 | 15 | - **Setup**: Runs `/config/setup/init-setup.sh` based on the configuration of DNS defined in `/config/setup/instances.yml`. The script will create a certificate authority (CA), underlying certificates for each service, and initialize the admin accounts for Elasticsearch(user:`elastic`) and Kibana(user:`kibana_system`). 16 | - **Elasticsearch**: Runs LME's database and indexes all logs. 17 | - **Kibana**: The front end for querying logs, visualizing data, and managing fleet agents. 18 | - **Elastic Fleet-Server**: Executes an [elastic agent ](https://github.com/elastic/elastic-agent) in fleet-server mode. Coordinates elastic agents to gather client logs and status. Configuration is inspired by the [elastic-container](https://github.com/peasead/elastic-container) project. 19 | - **Wazuh-Manager**: Allows LME to deploy and manage Wazuh agents. 20 | - Wazuh (open source) gives EDR (Endpoint Detection Response) with security dashboards to cover the security of all of the machines. 21 | - **LME-Frontend** (*coming in a future release*): Will host an API and GUI that unifies the architecture behind one interface. 22 | 23 | ### Required Ports: 24 | Ports required are as follows: 25 | - Elasticsearch: *9200* 26 | - Kibana: *443,5601* 27 | - Wazuh: *1514,1515,1516,55000,514* 28 | - Agent: *8220* 29 | 30 | **Note**: For Kibana, 5601 is the default port. We've also set kibana to listen on 443 as well. 31 | 32 | ### Agents and Agent Management: 33 | LME leverages both Wazuh and Elastic agents providing more comprehensive logging and security monitoring across various log sources. The agents gather critical data from endpoints and send it back to the LME server for analysis, offering organizations deeper visibility into their security posture. We also make use of the Wazuh Manager and Elastic Fleet for agent orchestration and management. 34 | 35 | - **Wazuh Agents**: Enables Endpoint Detection and Response (EDR) on client systems, providing advanced security features like intrusion detection and anomaly detection. For more information, see [Wazuh's agent documentation](https://github.com/wazuh/wazuh-agent). 36 | - **Wazuh Manager**: Responsible for managing Wazuh Agents across endpoints, and overseeing agent registration, configuration, and data collection, providing centralized control for monitoring security events and analyzing data. 37 | - **Elastic Agents**: Enhance log collection and management, allowing for greater control and customization in how data is collected and analyzed. Agents also feature a vast collection of integrations for many log types/applications. For more information, see [Elastic's agent documentation](https://github.com/elastic/elastic-agent). 38 | - **Elastic Fleet**: Manages Elastic Agents across your infrastructure, providing centralized control over agent deployment, configuration, and monitoring. It simplifies the process of adding and managing agents on various endpoints. ElasticFleet also supports centralized updates and policy management. 39 | 40 | 41 | ### Alerting: 42 | LME has setup [ElastAlert](https://elastalert2.readthedocs.io/en/latest/index.html), an open-source alerting framework, to automate alerting based on data stored in Elasticsearch. It monitors Elasticsearch for specific patterns, thresholds, or anomalies, and generates alerts when predefined conditions are met. This provides proactive detection of potential security incidents, enabling faster response and investigation. ElastAlert’s flexible rule system allows for custom alerts tailored to your organization’s security monitoring needs, making it a critical component of the LME alerting framework. 43 | 44 | ### Log Storage and Search: 45 | 46 | As the core component for log search and storage, [Elasticsearch](https://www.elastic.co/elasticsearch) indexes and stores logs and detections collected from Elastic and Wazuh Agents, allowing for fast, real-time querying of security events. Elasticsearch enables users to search and filter large datasets efficiently, providing a powerful backend for data analysis and visualization in Kibana. Its scalability and flexibility make it essential for handling the high-volume log data generated across different endpoints within LME's architecture. 47 | 48 | ### Data Visualization and Querying: 49 | [Kibana](https://www.elastic.co/kibana) is the visualization and analytics interface in LME, providing users with tools to visualize and monitor log data stored in Elasticsearch. It enables the creation of custom dashboards and visualizations, allowing users to easily track security events, detect anomalies, and analyze trends. Kibana's intuitive interface supports real-time insights into the security posture of an organization, making it an essential tool for data-driven decision-making in LME’s centralized logging and security monitoring framework. -------------------------------------------------------------------------------- /docs/markdown/reference/configuration.md: -------------------------------------------------------------------------------- 1 | # General notes on custom configuration for LME 2 | The configuration files are located in /config/. These steps will guide you through setting up LME. 3 | 4 | ## certificates and user passwords: 5 | - instances.yml defines the certificates to be created. 6 | - Shell scripts will initialize accounts and generate certificates. They run from the quadlet definitions lme-setup-accts and lme-setup-certs. 7 | 8 | ## Podman Quadlet Configuration 9 | - Quadlet configuration for containers is located in /quadlet/. These map to the root systemd unit files but execute as non-privileged users. -------------------------------------------------------------------------------- /docs/markdown/reference/faq.md: -------------------------------------------------------------------------------- 1 | # FAQ 2 | 3 | ## 1. What Is CISA’s Logging Made Easy (LME)? 4 | 5 | LME is a no-cost log management solution for small- to medium-sized organizations with limited resources that would otherwise have little to no functionality to detect attacks. LME offers centralized logging for Linux, macOS, and Windows operating systems, enabling proactive threat detection and enhanced security by allowing organizations to monitor their networks, identify users, and actively analyze Sysmon data to quickly detect potential malicious activity. 6 | 7 | ## 2. What makes LME unique? 8 | 9 | LME stands out as an accessible, open source log management and threat detection solution developed by CISA to support organizations with limited resources. By integrating Elastic and Wazuh in a secure, containerized stack, it provides endpoint security and real-time alerting without the complexity or cost of traditional SIEMs. Designed with customizable dashboards and Secure by Design principles, LME offers a user-friendly, effective solution to enhance visibility and strengthen threat detection. 10 | 11 | ## 3. What software drives LME? 12 | 13 | LME is powered by Elastic Stack (for log management, search, and visualization), Wazuh (for endpoint detection and response), and Podman (for containerization). This open source stack ensures transparency, flexibility and scalability while providing enhanced threat detection and customizable dashboards. 14 | 15 | ## 4. Which operating systems can use LME? 16 | 17 | LME 2.0 supports Windows, Linux, and macOS operating systems. Elastic and Wazuh agents enable compatibility across these platforms, ensuring broad coverage for monitoring and logging. While Wazuh agents also support Solaris, AIX, and HP-UX operating systems, CISA has not tested LME on endpoints running these operating systems. 18 | 19 | ## 5. Who can use LME? 20 | 21 | While intended for small to medium-sized organizations with limited resources, anyone can download and use LME. Reference [LME 2.0 Prerequisite documentation](/docs/markdown/prerequisites.md) for more details on required infrastructure and hardware including CPU, memory, and storage requirements. 22 | 23 | ## 6. Can LME run in the cloud? 24 | 25 | LME supports both on-premises and cloud deployments, allowing organizations to host LME on local or cloud service provider (CSP) infrastructure. 26 | 27 | ## 7. Does LME 2.0 require a new install or an update to existing installs? 28 | 29 | Both new and existing users must complete a full install of LME 2.0. 30 | 31 | LME has an upgrade process from v1.4 -> 2.0. The upgrade uninstalls 1.4 and installs 2.0, and will reintegrate old dashboards and data into the new 2.0 deployment. Checkout our [Upgrading docs](/scripts/upgrade/README.md) for more information on upgrading from an older version of LME to LME 2.0. 32 | 33 | ## 8. How do I download LME? 34 | 35 | Detailed installation and download steps can be found on the [Installation section of our ReadMe](https://github.com/cisagov/LME/tree/lme-2-docs?tab=readme-ov-file#installing-lme) 36 | 37 | ## 9. In light of LME 2.0, will older versions of LME stop working? 38 | 39 | While CISA recommends upgrading to LME 2.0, users can continue using older versions of LME, however, CISA will not support older versions. 40 | 41 | 42 | ## 10. How do I transition/migrate from older versions to LME 2.0 while retaining my log history? 43 | 44 | For existing LME users, [click here](/scripts/upgrade) for easy instructions on transferring log history from previous versions. LME will automatically reintegrate your log history and data. 45 | 46 | ## 11. Can I transfer my customized dashboards? If so, how? 47 | 48 | Yes, you can import your dashboards on Elastic from Stack Management > Kibana > Saved Objects and click import and select the custom dashboard ndjson file to import it into your Elastic instance. You'll need to export your old dashboards first. 49 | 50 | ## 12. Are there new system requirements for LME 2.0? 51 | 52 | Although system requirements are mostly the same for LME 2.0, we do have hardware and infrastructure recommendations in our [LME 2.0 Prerequisite documentation](/docs/markdown/prerequisites.md) 53 | 54 | ## 13. Where can I receive further support? 55 | 56 | For further support with LME 2.0 users can explore the following options: 57 | • Report LME issues via the GitHub 'Issues' tab at the top of the page or by clicking GitHub Issues 58 | • Visit GitHub 'Discussions' to check if your issue has been addressed or start a new thread 59 | • Directly email CyberSharedServices@cisa.dhs.gov for other questions or comments 60 | 61 | ## 14. Where Can I Find Additional Resources? 62 | 63 | Please visit [CISA’s LME website](https://www.cisa.gov/resources-tools/services/logging-made-easy) for additional resources. 64 | 65 | # Other Questions: 66 | 67 | ## Basic troubleshooting 68 | You can find basic troubleshooting steps in the [Troubleshooting Guide](troubleshooting.md). 69 | 70 | ## Finding your LME version (and the components versions) 71 | When reporting an issue or suggesting improvements, it is important to include the versions of all the components, where possible. This ensures that the issue has not already been fixed! 72 | 73 | ### Linux Server 74 | * Podman: on the Linux server type ```podman --version``` 75 | * Linux: on the Linux server type ```cat /etc/os-release``` 76 | * LME: show the contents of ```/opt/lme/config```, please redact private data 77 | 78 | ## Reporting a bug 79 | To report an issue with LME please use the GitHub 'issues' tab at the top of the (GitHub) page or click [GitHub Issues](https://github.com/cisagov/lme/issues). 80 | 81 | ## Questions about individual installations 82 | Please visit [GitHub Discussions](https://github.com/cisagov/lme/discussions) to see if your issue has been addressed before. 83 | 84 | -------------------------------------------------------------------------------- /docs/markdown/reference/passwords.md: -------------------------------------------------------------------------------- 1 | # Password Encryption: 2 | Ansible-vault is used to enable password encryption, securely storing all LME user and service user passwords at rest 3 | We do submit a hash of the password to Have I Been Pwned to check to see if it is compromised: [READ MORE HERE](https://haveibeenpwned.com/FAQs), but since they're all randomly generated this should be rare. 4 | 5 | ### Where Are Passwords Stored?: 6 | ```bash 7 | # Define user-specific paths 8 | USER_VAULT_DIR="/etc/lme/vault" 9 | PASSWORD_FILE="/etc/lme/pass.sh" 10 | ``` 11 | 12 | ### Grabbing Passwords: 13 | To view the appropriate service user password run the following commands: 14 | ```bash 15 | #script: 16 | $CLONE_DIRECTORY/scripts/extract_secrets.sh -p #to print 17 | 18 | #add them as variables to your current shell 19 | source $CLONE_DIRECTORY/scripts/extract_secrets.sh #without printing values 20 | source $CLONE_DIRECTORY/scripts/extract_secrets.sh -q #with no output 21 | ``` 22 | 23 | ### Manually Setting Up Passwords and Accessing Passwords **Unsupported**: 24 | **These steps are not fully supported by CISA and are left if others would like to support this in their environment** 25 | 26 | Run the password_management.sh script: 27 | ```bash 28 | lme-user@ubuntu:~/LME-TEST$ sudo -i ${PWD}/scripts/password_management.sh -h 29 | -i: Initialize all password environment variables and settings 30 | -s: set_user: Set user password 31 | -p: Manage Podman secret 32 | -l: List Podman secrets 33 | -h: print this list 34 | ``` 35 | 36 | A cli one liner to grab passwords (this also demonstrates how we're using Ansible-vault in extract_secrets.sh): 37 | ```bash 38 | #where wazuh_api is the service user whose password you want: 39 | USER_NAME=wazuh_api 40 | sudo -i ansible-vault view /etc/lme/vault/$(sudo -i podman secret ls | grep $USER_NAME | awk '{print $1}') 41 | ``` 42 | -------------------------------------------------------------------------------- /docs/markdown/reference/security-model.md: -------------------------------------------------------------------------------- 1 | 2 | # Logging Made Easy (LME) Security Model 3 | 4 | This document describes LME's security model from the perspective of the LME user. 5 | It will help users understand the security model and make informed decisions about how to deploy and manage LME given the constraints and assumptions in LME's design. 6 | 7 | ## Operating System: 8 | LME has been tested on Ubuntu 22.04, but in theory should support any unix operating system that can install the dependencies listed in the ReadMe. We assume your operating system is kept up to date and the linux kernel is up to date and patched. 9 | Failing to patch your operating system could leave your security infrastructure vulnerable. 10 | In addition, if a side channel or denial of service (DoS) attack is ever discovered at the operating system level, LME considers these attacks out of scope for something we can reasonably secure against. 11 | 12 | ## Users: 13 | 1. **Root**: Every Linux operating system has a root user. As the root user, ensure least privilege access to root following lockdown/hardening best practices (e.g. disabling root login, securing administrator access, disabling root over ssh [See More Details](https://wiki.archlinux.org/title/Security#Restricting_root)). 14 | 2. **Administrators (i.e. those with sudo access)**: LME runs all its architecture through administrator services, so anyone with administrator access can access all LME data. Ensure only trusted users are given access to the `sudo` group. Administrators can start/stop LME services and also manage the service user passwords. Administrators control the master password to each server user. 15 | 3. **Container User**: These users run processes within the LME service container and should only be managed through 'podman exec'. Their passwords are either initialized or locked, and they execute within their own user namespace. They are mostly abstracted away from the typical LME administrator. Some more information on [User Namespaces](https://www.man7.org/conf/meetup/understanding-user-namespaces--Google-Munich-Kerrisk-2019-10-25.pdf) and [Podman User Namespaces](https://www.redhat.com/sysadmin/rootless-podman-user-namespace-modes) 16 | 4. **Service User**: These are the user/password combinations that administer, access, and update LME services via their respective APIs. All Service Users passwords are encrypted into individual Ansible Vault files, and encrypted using the master password. The Service User's password will only be decrypted as a Podman secret shared into the container via its environment. The users are: `elastic`, `kibana_system`, and `wazuh-wui`. 17 | 18 | ## Services Containerized: 19 | All services that make up LME (as documented in our [diagram](https://github.com/cisagov/LME/blob/release-2.0.0/docs/imgs/lme-architecture-v2.jpg)) are configured to execute in Podman containers started via systemd services using Podman's internal quadlet orchestration system. 20 | The quadlets are installed into the system administrator's directory `/etc/containers/systemd/` and will start up under root's privileges (similar to other systemd root services). 21 | 22 | This ensures least privilege throughout the LME architecture: 23 | 1. The master password file (used to encrypt service user passwords at rest) is owned by root. 24 | 2. Eacher Service User password is encrypted with the above password, and only required service user password files are shared to each respective container. 25 | 3. Even on a full container escape (where an adversary can execute code on the host, outside of the container), the rootuid of each service container, is a non-privileged userid on the host, so they cannot gain access to anything (file, network, password, etc...) they would have access to already in the container. 26 | 27 | 28 | 29 | -------------------------------------------------------------------------------- /quadlet/lme-backups.volume: -------------------------------------------------------------------------------- 1 | [Unit] 2 | PartOf=lme.service 3 | After=lme.service 4 | 5 | [Service] 6 | 7 | 8 | [Volume] 9 | VolumeName=lme_backups 10 | User=165536 11 | Group=165536 12 | Driver=local 13 | -------------------------------------------------------------------------------- /quadlet/lme-elastalert.container: -------------------------------------------------------------------------------- 1 | # lme-elastalert.container 2 | [Unit] 3 | Description=Elastalert Service 4 | After=lme-elasticsearch.service 5 | Requires=lme-elasticsearch.service 6 | PartOf=lme.service 7 | 8 | [Service] 9 | Restart=always 10 | LimitNOFILE=655360 11 | Environment=ANSIBLE_VAULT_PASSWORD_FILE=/etc/lme/pass.sh 12 | 13 | [Install] 14 | WantedBy=default.target lme.service 15 | 16 | [Container] 17 | ContainerName=lme-elastalert2 18 | Environment=ES_HOST=lme-elasticsearch ES_PORT=9200 ES_USERNAME=elastic 19 | EnvironmentFile=/opt/lme/lme-environment.env 20 | Secret=elastic,type=env,target=ES_PASSWORD 21 | HostName=elastalert2 22 | Image=localhost/elastalert2:LME_LATEST 23 | Network=lme 24 | PodmanArgs=--network-alias lme-elastalert2 25 | Volume=lme_elastalert2_logs:/opt/elastalert/logs 26 | Volume=/opt/lme/config/elastalert2/rules:/opt/elastalert/rules:ro 27 | Volume=/opt/lme/config/elastalert2/misc:/opt/elastalert/misc:ro 28 | Volume=/opt/lme/config/elastalert2/config.yaml:/opt/elastalert/config.yaml:ro 29 | Volume=lme_certs:/etc/wazuh-manager/certs:ro 30 | Volume=/etc/ssl/certs/ca-certificates.crt:/etc/ssl/certs/ca-certificates.crt:ro 31 | UserNS=auto:uidmapping=0:177728:3048,gidmapping=0:177728:3048 32 | #TODO: add a health check command 33 | #HealthCmd=CMD-SHELL curl -I -s --cacert config/certs/ca/ca.crt https://localhost:5601 | grep -q 'HTTP/1.1 302 Found' 34 | -------------------------------------------------------------------------------- /quadlet/lme-elasticsearch.container: -------------------------------------------------------------------------------- 1 | # lme-elasticsearch.container 2 | [Unit] 3 | Description=Elasticsearch Container Service 4 | Requires=lme-network.service lme-setup-certs.service 5 | After=lme-network.service lme-setup-certs.service 6 | PartOf=lme.service 7 | 8 | [Service] 9 | Restart=always 10 | Environment=ANSIBLE_VAULT_PASSWORD_FILE=/etc/lme/pass.sh 11 | TimeoutStartSec=5400 12 | 13 | [Install] 14 | WantedBy=default.target lme.service 15 | 16 | [Container] 17 | ContainerName=lme-elasticsearch 18 | #TODO: set discovery mode/cluster.name via environment 19 | Environment=node.name=lme-elasticsearch cluster.name=LME bootstrap.memory_lock=true discovery.type=single-node xpack.security.enabled=true xpack.security.http.ssl.enabled=true xpack.security.http.ssl.key=certs/elasticsearch/elasticsearch.key xpack.security.http.ssl.certificate=certs/elasticsearch/elasticsearch.chain.pem xpack.security.http.ssl.certificate_authorities=certs/ca/ca.crt xpack.security.http.ssl.verification_mode=certificate xpack.security.http.ssl.client_authentication=optional xpack.security.transport.ssl.enabled=true xpack.security.transport.ssl.key=certs/elasticsearch/elasticsearch.key xpack.security.transport.ssl.certificate=certs/elasticsearch/elasticsearch.crt xpack.security.transport.ssl.certificate_authorities=certs/ca/ca.crt xpack.security.transport.ssl.verification_mode=certificate xpack.security.transport.ssl.client_authentication=optional xpack.license.self_generated.type=basic 20 | Secret=elastic,type=env,target=ELASTIC_PASSWORD 21 | Secret=kibana_system,type=env,target=ELASTICSEARCH_PASSWORD 22 | Secret=kibana_system,type=env,target=KIBANA_PASSWORD 23 | EnvironmentFile=/opt/lme/lme-environment.env 24 | Image=localhost/elasticsearch:LME_LATEST 25 | Network=lme 26 | PodmanArgs= --network-alias lme-elasticsearch --health-interval=2s 27 | PublishPort=9200:9200 28 | Ulimit=memlock=-1:-1 29 | Volume=lme_certs:/usr/share/elasticsearch/config/certs 30 | Volume=lme_esdata01:/usr/share/elasticsearch/data 31 | Volume=lme_backups:/usr/share/elasticsearch/backups 32 | Volume=/opt/lme/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml:ro 33 | Notify=healthy 34 | HealthCmd=CMD-SHELL curl -s --cacert config/certs/ca/ca.crt https://localhost:9200 | grep -q 'missing authentication credentials' 35 | User=elasticsearch 36 | UserNS=auto:uidmapping=0:165536:3048,gidmapping=0:165536:3048 37 | #UserNS=auto 38 | -------------------------------------------------------------------------------- /quadlet/lme-esdata01.volume: -------------------------------------------------------------------------------- 1 | [Unit] 2 | PartOf=lme.service 3 | After=lme.service 4 | 5 | [Service] 6 | 7 | 8 | [Volume] 9 | VolumeName=lme_esdata01 10 | User=165536 11 | Group=165536 12 | Driver=local 13 | -------------------------------------------------------------------------------- /quadlet/lme-fleet-server.container: -------------------------------------------------------------------------------- 1 | # lme-fleet-server.container 2 | [Unit] 3 | Description=Fleet Container Service 4 | Requires=lme-elasticsearch.service 5 | After=lme-elasticsearch.service lme-kibana.service 6 | PartOf=lme.service 7 | ConditionPathExists=/opt/lme/FLEET_SETUP_FINISHED 8 | 9 | [Service] 10 | Restart=always 11 | TimeoutStartSec=5400 12 | Environment=ANSIBLE_VAULT_PASSWORD_FILE=/etc/lme/pass.sh 13 | 14 | [Install] 15 | WantedBy=default.target lme.service 16 | 17 | [Container] 18 | ContainerName=lme-fleet-server 19 | Environment=FLEET_ENROLL=1 FLEET_SERVER_POLICY_ID=fleet-server-policy FLEET_SERVER_ENABLE=1 KIBANA_FLEET_SETUP=1 KIBANA_HOST=https://lme-kibana:5601 FLEET_URL=https://lme-fleet-server:8220 FLEET_SERVER_ELASTICSEARCH_HOST=https://lme-elasticsearch:9200 FLEET_CA=/certs/ca/ca.crt FLEET_SERVER_CERT=/certs/fleet-server/fleet-server.crt FLEET_SERVER_CERT_KEY=/certs/fleet-server/fleet-server.key FLEET_SERVER_ELASTICSEARCH_CA=/certs/ca/ca.crt KIBANA_FLEET_CA=/certs/ca/ca.crt NODE_EXTRA_CA_CERTS=/etc/ssl/certs/ca-certificates.crt ELASTICSEARCH_HOST=https://lme-elasticsearch:9200 ELASTICSEARCH_HOSTS=https://lme-elasticsearch:9200 ES_HOSTS=https://lme-elasticsearch:9200 ELASTIC_AGENT_ELASTICSEARCH_HOST=https://lme-elasticsearch:9200 20 | EnvironmentFile=/opt/lme/lme-environment.env 21 | Secret=elastic,type=env,target=KIBANA_FLEET_PASSWORD 22 | Image=localhost/elastic-agent:LME_LATEST 23 | Network=lme 24 | HostName=lme-fleet-server 25 | PodmanArgs=--network-alias lme-fleet-server --requires 'lme-elasticsearch,lme-kibana' 26 | PublishPort=8220:8220 27 | Volume=lme_certs:/certs:ro 28 | Volume=lme_fleet_data:/usr/share/elastic-agent 29 | UserNS=auto:uidmapping=0:171632:3048,gidmapping=0:171632:3048 30 | #TODO: fix this, need to check if its ready before polling API 31 | #HealthCmd=CMD-SHELL curl -s --cacert /certs/ca/ca.crt https://localhost:8220/api/status | grep '"status":"HEALTHY"' 32 | #Notify=healthy 33 | -------------------------------------------------------------------------------- /quadlet/lme-kibana.container: -------------------------------------------------------------------------------- 1 | # lme-kibana.container 2 | [Unit] 3 | Description=Kibana Container Service 4 | Requires=lme-setup-accts.service lme-elasticsearch.service lme-kibanadata-volume.service 5 | After=lme-setup-accts.service lme-elasticsearch.service lme-kibanadata-volume.service 6 | PartOf=lme.service 7 | 8 | [Install] 9 | WantedBy=default.target lme.service 10 | 11 | [Service] 12 | Restart=always 13 | TimeoutStartSec=5400 14 | Environment=ANSIBLE_VAULT_PASSWORD_FILE=/etc/lme/pass.sh 15 | 16 | [Container] 17 | ContainerName=lme-kibana 18 | Environment=SERVER_NAME=lme-kibana ELASTICSEARCH_HOSTS=https://lme-elasticsearch:9200 ELASTICSEARCH_SSL_CERTIFICATEAUTHORITIES=config/certs/ca/ca.crt SERVER_SSL_ENABLED=true SERVER_SSL_CERTIFICATE=config/certs/kibana/kibana.crt SERVER_SSL_KEY=config/certs/kibana/kibana.key SERVER_SSL_CERTIFICATEAUTHORITIES=config/certs/ca/ca.crt NODE_EXTRA_CA_CERTS=/etc/ssl/certs/ca-certificates.crt NODE_OPTIONS=--max-old-space-size=4096 19 | Secret=kibana_system,type=env,target=ELASTICSEARCH_PASSWORD 20 | EnvironmentFile=/opt/lme/lme-environment.env 21 | Image=localhost/kibana:LME_LATEST 22 | Network=lme 23 | PodmanArgs= --network-alias lme-kibana --requires lme-elasticsearch --health-interval=2s 24 | PublishPort=5601:5601,443:5601 25 | Volume=lme_certs:/usr/share/kibana/config/certs 26 | Volume=lme_kibanadata:/usr/share/kibana/data 27 | Volume=/opt/lme/config/kibana.yml:/usr/share/kibana/config/kibana.yml 28 | Volume=/etc/ssl/certs/ca-certificates.crt:/etc/ssl/certs/ca-certificates.crt:ro 29 | HealthCmd=CMD-SHELL curl -I -s --cacert config/certs/ca/ca.crt https://localhost:5601 | grep -q 'HTTP/1.1 302 Found' 30 | Notify=healthy 31 | UserNS=auto:uidmapping=0:168584:3048,gidmapping=0:168584:3048 32 | -------------------------------------------------------------------------------- /quadlet/lme-kibanadata.volume: -------------------------------------------------------------------------------- 1 | [Unit] 2 | PartOf=lme.service 3 | After=lme.service 4 | 5 | [Service] 6 | 7 | [Volume] 8 | VolumeName=lme_kibanadata 9 | User=169584 10 | Group=169584 11 | Driver=local 12 | -------------------------------------------------------------------------------- /quadlet/lme-setup-accts.container: -------------------------------------------------------------------------------- 1 | # lme-elasticsearch-security-setup.container 2 | [Unit] 3 | Requires=lme-network.service lme-setup-certs.service lme-elasticsearch.service 4 | After=lme-network.service lme-setup-certs.service lme-elasticsearch.service 5 | PartOf=lme.service 6 | 7 | [Service] 8 | Type=oneshot 9 | RemainAfterExit=yes 10 | Environment=ANSIBLE_VAULT_PASSWORD_FILE=/etc/lme/pass.sh 11 | 12 | [Install] 13 | WantedBy=default.target 14 | 15 | [Container] 16 | ContainerName=lme-setup-accts 17 | EnvironmentFile=/opt/lme/lme-environment.env 18 | Secret=elastic,type=env,target=ELASTIC_PASSWORD 19 | Secret=kibana_system,type=env,target=KIBANA_PASSWORD 20 | Exec=/bin/bash /usr/share/elasticsearch/config/setup/acct-init.sh 21 | Image=localhost/elasticsearch:LME_LATEST 22 | Network=lme 23 | PodmanArgs=--network-alias lme-setup --health-interval=2s 24 | Volume=lme_certs:/usr/share/elasticsearch/config/certs 25 | Volume=/opt/lme/config/setup:/usr/share/elasticsearch/config/setup 26 | User=0 27 | #match permissions so we can write to the lme_certs volume 28 | UserNS=container:lme-elasticsearch 29 | 30 | -------------------------------------------------------------------------------- /quadlet/lme-setup-certs.container: -------------------------------------------------------------------------------- 1 | # lme-elasticsearch-security-setup.container 2 | [Unit] 3 | Requires=lme-network.service lme-esdata01-volume.service lme-kibanadata-volume.service 4 | After=lme.service lme-network.service lme-esdata01-volume.service lme-kibanadata-volume.service 5 | PartOf=lme.service 6 | 7 | [Service] 8 | Type=oneshot 9 | RemainAfterExit=yes 10 | Environment=ANSIBLE_VAULT_PASSWORD_FILE=/etc/lme/pass.sh 11 | 12 | [Install] 13 | WantedBy=default.target lme.service 14 | 15 | [Container] 16 | ContainerName=lme-setup-certs 17 | EnvironmentFile=/opt/lme/lme-environment.env 18 | Secret=elastic,type=env,target=ELASTIC_PASSWORD 19 | Secret=kibana_system,type=env,target=KIBANA_PASSWORD 20 | Exec=/bin/bash /usr/share/elasticsearch/config/setup/init-setup.sh 21 | Image=localhost/elasticsearch:LME_LATEST 22 | Network=lme 23 | PodmanArgs=--network-alias lme-setup --health-interval=2s 24 | Volume=lme_certs:/usr/share/elasticsearch/config/certs 25 | Volume=lme_esdata01:/usr/share/elasticsearch/data 26 | Volume=/opt/lme/config/setup:/usr/share/elasticsearch/config/setup 27 | User=0 28 | #UserNS=auto:uidmapping=165536:165536:3048,gidmapping=165536:165536:3048 29 | UserNS=auto 30 | -------------------------------------------------------------------------------- /quadlet/lme-wazuh-manager.container: -------------------------------------------------------------------------------- 1 | # lme-wazuh-manager.container 2 | [Unit] 3 | Description=Wazuh Container Service 4 | After=lme-elasticsearch.service 5 | Requires=lme-elasticsearch.service 6 | PartOf=lme.service 7 | 8 | [Service] 9 | Restart=always 10 | LimitNOFILE=655360 11 | Environment=ANSIBLE_VAULT_PASSWORD_FILE=/etc/lme/pass.sh 12 | TimeoutStartSec=5400 13 | 14 | [Install] 15 | WantedBy=default.target lme.service 16 | 17 | [Container] 18 | ContainerName=lme-wazuh-manager 19 | Environment=INDEXER_URL=https://lme-elasticsearch:9200 FILEBEAT_SSL_VERIFICATION_MODE=full SSL_CERTIFICATE_AUTHORITIES=/etc/wazuh-manager/certs/ca/ca.crt SSL_CERTIFICATE=/etc/wazuh-manager/certs/wazuh-manager/wazuh-manager.crt SSL_KEY=/etc/wazuh-manager/certs/wazuh-manager/wazuh-manager.key 20 | EnvironmentFile=/opt/lme/lme-environment.env 21 | Secret=wazuh,type=env,target=WAZUH_PASSWORD 22 | Secret=wazuh_api,type=env,target=API_PASSWORD 23 | Secret=elastic,type=env,target=INDEXER_PASSWORD 24 | HostName=wazuh-manager 25 | Image=localhost/wazuh-manager:LME_LATEST 26 | Network=lme 27 | PodmanArgs=--network-alias lme-wazuh-manager --health-interval=30s --health-timeout=10s --health-retries=5 --health-start-period=120s 28 | PublishPort=1514:1514 29 | PublishPort=1515:1515 30 | PublishPort=514:514/udp 31 | PublishPort=55000:55000 32 | Ulimit=memlock=-1:-1 33 | #Set above, leaving here for posterity, systemctl doesn't allow containers to set ulimits 34 | #Ulimit=nofile=655360:655360 35 | Volume=lme_wazuh_api_configuration:/var/ossec/api/configuration 36 | Volume=lme_wazuh_etc:/var/ossec/etc 37 | Volume=lme_wazuh_logs:/var/ossec/logs 38 | Volume=lme_wazuh_queue:/var/ossec/queue 39 | Volume=lme_wazuh_logs:/var/ossec/logs 40 | Volume=lme_wazuh_var_multigroups:/var/ossec/var/multigroups 41 | Volume=lme_wazuh_integrations:/var/ossec/integrations 42 | Volume=lme_wazuh_active_response:/var/ossec/active-response/bin 43 | Volume=lme_wazuh_agentless:/var/ossec/agentless 44 | Volume=lme_wazuh_wodles:/var/ossec/wodles 45 | Volume=lme_filebeat_etc:/etc/filebeat 46 | Volume=lme_filebeat_var:/var/lib/filebeat 47 | Volume=/opt/lme/config/wazuh_cluster/wazuh_manager.conf:/wazuh-config-mount/etc/ossec.conf 48 | Volume=lme_certs:/etc/wazuh-manager/certs:ro 49 | Volume=/etc/ssl/certs/ca-certificates.crt:/etc/ssl/certs/ca-certificates.crt:ro 50 | UserNS=auto:uidmapping=0:174680:3048,gidmapping=0:174680:3048 51 | HealthCmd=CMD-SHELL curl -k -s -o /dev/null -w "%{http_code}" https://localhost:55000 | grep 401 52 | Notify=healhy 53 | 54 | -------------------------------------------------------------------------------- /quadlet/lme.network: -------------------------------------------------------------------------------- 1 | # lme.network 2 | [Unit] 3 | PartOf=lme.service 4 | After=lme.service 5 | 6 | [Service] 7 | 8 | [Network] 9 | Driver=bridge 10 | Gateway=10.89.4.1 11 | IPAMDriver=host-local 12 | NetworkName=lme 13 | Subnet=10.89.4.0/24 14 | -------------------------------------------------------------------------------- /quadlet/lme.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=LME service orchestrator runs all the service files 3 | 4 | [Install] 5 | WantedBy=default.target 6 | 7 | [Service] 8 | # Exits after it starts the service 9 | Type=oneshot 10 | # Execute dummy program 11 | ExecStart=/bin/true 12 | # This service shall be considered active after start 13 | RemainAfterExit=yes 14 | 15 | 16 | 17 | -------------------------------------------------------------------------------- /scripts/check_fleet_api.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | get_script_path() { 4 | local source="${BASH_SOURCE[0]}" 5 | while [ -h "$source" ]; do 6 | local dir="$(cd -P "$(dirname "$source")" && pwd)" 7 | source="$(readlink "$source")" 8 | [[ $source != /* ]] && source="$dir/$source" 9 | done 10 | echo "$(cd -P "$(dirname "$source")" && pwd)" 11 | } 12 | 13 | SCRIPT_DIR="$(get_script_path)" 14 | 15 | HEADERS=( 16 | -H "kbn-version: 8.15.3" 17 | -H "kbn-xsrf: kibana" 18 | -H 'Content-Type: application/json' 19 | ) 20 | 21 | # Function to check if Fleet API is ready 22 | check_fleet_ready() { 23 | local response 24 | response=$(curl -k -s --user "elastic:${elastic}" \ 25 | "${HEADERS[@]}" \ 26 | "${LOCAL_KBN_URL}/api/fleet/settings") 27 | 28 | if [[ "$response" == *"Kibana server is not ready yet"* ]]; then 29 | return 1 30 | else 31 | return 0 32 | fi 33 | } 34 | 35 | # Wait for Fleet API to be ready 36 | wait_for_fleet() { 37 | echo "Waiting for Fleet API to be ready..." 38 | max_attempts=60 39 | attempt=1 40 | while ! check_fleet_ready; do 41 | if [ $attempt -ge $max_attempts ]; then 42 | echo "Fleet API did not become ready after $max_attempts attempts. Exiting." 43 | exit 1 44 | fi 45 | echo "Attempt $attempt: Fleet API not ready. Waiting 10 seconds..." 46 | sleep 10 47 | attempt=$((attempt + 1)) 48 | done 49 | echo "Fleet API is ready. Proceeding with configuration..." 50 | } 51 | 52 | #main: 53 | source /opt/lme/lme-environment.env 54 | 55 | # Set the secrets values and export them (source instead of execute) 56 | set -a 57 | . $SCRIPT_DIR/extract_secrets.sh -q 58 | 59 | wait_for_fleet -------------------------------------------------------------------------------- /scripts/check_password.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | check_password() { 4 | local password="$1" 5 | local min_length=12 6 | 7 | # Check password length 8 | if [ ${#password} -lt $min_length ]; then 9 | echo "Input is too short. It should be at least $min_length characters long." 10 | return 1 11 | fi 12 | 13 | # Generate SHA-1 hash of the password 14 | hash=$(echo -n "$password" | openssl sha1 | awk '{print $2}') 15 | prefix="${hash:0:5}" 16 | suffix="${hash:5}" 17 | 18 | # Check against HIBP API 19 | response=$(curl -s "https://api.pwnedpasswords.com/range/$prefix") 20 | 21 | if echo "$response" | grep -qi "$suffix"; then 22 | echo "This input has been found in known data breaches. Please choose a different one." 23 | return 1 24 | fi 25 | 26 | # If we've made it here, the input meets the requirements 27 | echo "Input meets the complexity requirements and hasn't been found in known data breaches." 28 | return 0 29 | } 30 | 31 | # Main script 32 | if [ -n "$CHECKME" ]; then 33 | # Use input from environment variable 34 | check_password "$CHECKME" 35 | elif [ $# -eq 1 ]; then 36 | # Use input from command-line argument 37 | check_password "$1" 38 | else 39 | echo "Usage: CHECKME=your_input $0" 40 | echo " or: $0 your_input" 41 | exit 1 42 | fi -------------------------------------------------------------------------------- /scripts/download.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | source .env 3 | USER=elastic 4 | PASSWORD=${ELASTIC_PASSWORD_ESCAPED} 5 | PROTO=https 6 | REMOTE=10.20.0.174:9200 7 | 8 | #TODO: make this a cli flag 9 | #------------ edit this----------- 10 | #assumes files are INDEX_mapping.json + INDEX.json 11 | # mapping + logs 12 | DIR=/data/logs/ 13 | INDICES=$(ls ${DIR} | cut -f -3 -d '.' | grep -v "_mapping"| grep -v "template"| sort | uniq) 14 | #INDICES=$("elastalert_status" "elastalert_status_error" "elastalert_status_past" "elastalert_status_silence" "elastalert_status") 15 | 16 | 17 | #------------ edit this ----------- 18 | 19 | echo -e "\n\ncheck \`podman logs -f CONTAINER_NAME\` for verbose output\n\n" 20 | echo -e "\n--Uploading: --\n" 21 | for x in ${INDICES}; 22 | do 23 | echo "podman runs for $x:" 24 | podman run -it -d -v ${DIR}${x}_mapping.json:/tmp/data.json -e NODE_TLS_REJECT_UNAUTHORIZED=0 --userns="" --network=host elasticdump/elasticsearch-dump --output=/tmp/data.json --input=${PROTO}://${USER}:${PASSWORD}@localhost:9200/${x} --type=mapping 25 | 26 | podman run -v ${DIR}${x}:/tmp/ -e NODE_TLS_REJECT_UNAUTHORIZED=0 --userns="" --network=host --rm -ti elasticdump/elasticsearch-dump --input=http://${REMOTE}/${x} --output=/tmp/${x}.json --limit 5000 27 | echo "" 28 | done 29 | 30 | ## cleanup: 31 | echo "--to cleanup when done:--" 32 | echo "podman ps -a --format \"{{.Image}} {{.Names}}\" | grep -i "elasticdump" | awk \'{print $2}\' | xargs podman rm" 33 | 34 | tot=$(wc -l $(ls ${DIR} | grep -v "_mapping" | xargs -I{} echo ${DIR}{})) 35 | echo -e "\n--Expected Log #:\n $tot--" 36 | 37 | -------------------------------------------------------------------------------- /scripts/extract_secrets.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Function to print usage 4 | print_usage() { 5 | echo "Usage: source $0 [-p] [-q]" 6 | echo " -p Print the secret values (use with caution)" 7 | echo " -q Quiet mode (suppress all output)" 8 | } 9 | 10 | # Default behavior: don't print secrets and don't suppress output 11 | PRINT_SECRETS=false 12 | QUIET_MODE=false 13 | 14 | # Parse command line options 15 | while getopts ":pq" opt; do 16 | case ${opt} in 17 | p ) 18 | PRINT_SECRETS=true 19 | ;; 20 | q ) 21 | QUIET_MODE=true 22 | ;; 23 | \? ) 24 | print_usage 25 | return 1 26 | ;; 27 | esac 28 | done 29 | 30 | # Function to echo only if not in quiet mode 31 | echo_if_not_quiet() { 32 | if ! $QUIET_MODE; then 33 | echo "$@" 34 | fi 35 | } 36 | 37 | # Source the profile to ensure podman is available in the current shell 38 | if [ -f ~/.profile ]; then 39 | . ~/.profile 40 | else 41 | echo "~/.profile not found. Make sure podman is in your PATH." 42 | return 1 43 | fi 44 | 45 | # Find the full path to podman 46 | PODMAN_PATH=$(which podman) 47 | 48 | if [ -z "$PODMAN_PATH" ]; then 49 | echo "podman command not found. Please ensure it's installed and in your PATH." 50 | return 1 51 | fi 52 | 53 | echo_if_not_quiet "Found podman at: $PODMAN_PATH" 54 | 55 | # Run the podman secret ls command with sudo and capture the output 56 | output=$(sudo "$PODMAN_PATH" secret ls) 57 | 58 | # Check if the command was successful 59 | if [ $? -ne 0 ]; then 60 | echo "Failed to run 'sudo $PODMAN_PATH secret ls'. Check your permissions and podman installation." 61 | return 1 62 | fi 63 | 64 | # Process the output and create a string of export commands 65 | export_commands="" 66 | while IFS= read -r line; do 67 | if [[ $line != ID* ]]; then # Skip the header line 68 | # Parse the line into variables 69 | read -r id name driver created updated <<< "$line" 70 | 71 | # Use the name as-is for the variable name 72 | var_name=$name 73 | 74 | # Get the real password using ansible-vault 75 | secret_value=$(sudo -i ansible-vault view /etc/lme/vault/$id) 76 | 77 | # Add export command to the string 78 | export_commands+="export $var_name='$secret_value'; " 79 | 80 | if $PRINT_SECRETS; then 81 | echo "Exported $var_name: $secret_value" 82 | elif ! $QUIET_MODE; then 83 | echo "Exported $var_name" 84 | fi 85 | fi 86 | done <<< "$output" 87 | 88 | # Execute the export commands 89 | eval "$export_commands" 90 | 91 | if $PRINT_SECRETS; then 92 | echo "Exported variables with values:" 93 | env | grep -E "^(wazuh|wazuh_api|kibana_system|elastic)=" 94 | elif ! $QUIET_MODE; then 95 | echo "Exported variables (values hidden):" 96 | env | grep -E "^(wazuh|wazuh_api|kibana_system|elastic)=" | cut -d= -f1 97 | fi 98 | 99 | if ! $QUIET_MODE; then 100 | echo "" 101 | echo "To use these variables in your current shell, source this script instead of executing it:" 102 | echo "source $0 # to export variables without printing values" 103 | echo "source $0 -p # to export variables and print values (use with caution)" 104 | echo "source $0 -q # to export variables without any output" 105 | fi -------------------------------------------------------------------------------- /scripts/gen_cert.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | source .env 3 | 4 | #set via cli arg 5 | CERT_DIR=${1:-caddy/certs} 6 | 7 | ## generate CA: 8 | echo "creating CA CRT" 9 | export CERT_STRING='/C=US/ST=DC/L=Washington/O=CISA' 10 | openssl genrsa -out ${CERT_DIR}/root-ca.key 4096 11 | openssl req -new -key ${CERT_DIR}/root-ca.key -out ${CERT_DIR}/root-ca.csr -sha256 -subj "$CERT_STRING/CN=LME" 12 | openssl x509 -req -days 3650 -in ${CERT_DIR}/root-ca.csr -signkey ${CERT_DIR}/root-ca.key -sha256 -out ${CERT_DIR}/root-ca.crt 13 | 14 | echo "creating caddy CRT" 15 | openssl genrsa -out ${CERT_DIR}/caddy.key 4096 16 | openssl req -new -key ${CERT_DIR}/caddy.key -out ${CERT_DIR}/caddy.csr -sha256 -subj "$CERT_STRING/CN=caddy" 17 | 18 | #set openssl so that this cert can only perform server auth and cannot sign certs 19 | { 20 | echo "[server]" 21 | echo "authorityKeyIdentifier=keyid,issuer" 22 | echo "basicConstraints = critical,CA:FALSE" 23 | echo "extendedKeyUsage=serverAuth,clientAuth" 24 | echo "keyUsage = critical, digitalSignature, keyEncipherment" 25 | #echo "subjectAltName = DNS:elasticsearch, IP:127.0.0.1" 26 | echo "subjectAltName = DNS:ls1, IP:127.0.0.1" 27 | echo "subjectKeyIdentifier=hash" 28 | } >${CERT_DIR}/caddy.cnf 29 | openssl x509 -req -days 3650 -in ${CERT_DIR}/caddy.csr -sha256 -CA ${CERT_DIR}/root-ca.crt -CAkey ${CERT_DIR}/root-ca.key -CAcreateserial -out ${CERT_DIR}/caddy.crt -extfile ${CERT_DIR}/caddy.cnf -extensions server 30 | -------------------------------------------------------------------------------- /scripts/install_sysmon.ps1: -------------------------------------------------------------------------------- 1 | # Curl and unzip sysmon off the windows sysinternals page 2 | curl https://download.sysinternals.com/files/Sysmon.zip -OutFile sysmon.zip 3 | Expand-Archive sysmon.zip 4 | # Curl and unzip the swift on config xml 5 | curl https://github.com/SwiftOnSecurity/sysmon-config/archive/refs/heads/master.zip -OutFile sysmon-config.zip 6 | Expand-Archive sysmon-config.zip 7 | # install sysmon 8 | .\sysmon\sysmon -accepteula -i .\sysmon-config\sysmon-config-master\sysmonconfig-export.xml -------------------------------------------------------------------------------- /scripts/link_latest_podman_quadlet.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #TODO: may need to add a nix-store --gc, to clean up extra files that exist 3 | # Find the latest podman version in the Nix store 4 | latest_podman=$(find /nix/store -maxdepth 1 -name '*-podman-*' | 5 | sed -n 's/.*-podman-\([0-9.]*\)$/\1/p' | 6 | sort -V | 7 | tail -n1) 8 | 9 | if [ -n "$latest_podman" ]; then 10 | # Find the full path of the latest version 11 | podman_path=$(find /nix/store -maxdepth 1 -name "*-podman-${latest_podman}" | tail -n1) 12 | 13 | # Assign the result to a variable 14 | LATEST_PODMAN_PATH="$podman_path" 15 | 16 | echo "Latest Podman version found: $latest_podman" 17 | echo "Path: $LATEST_PODMAN_PATH" 18 | else 19 | echo "No Podman installation found in the Nix store." 20 | fi 21 | 22 | 23 | sudo ln -sf "$LATEST_PODMAN_PATH/lib/systemd/system-generators/podman-system-generator" /usr/lib/systemd/system-generators/podman-system-generator 24 | sudo ln -sf "$LATEST_PODMAN_PATH/lib/systemd/user-generators/podman-user-generator" /usr/lib/systemd/user-generators/ 25 | sudo ln -sf -t /usr/lib/systemd/system/ /nix/store/$LATEST_PODMAN_PATH/lib/systemd/system/* 26 | sudo ln -sf -t /usr/lib/systemd/user/ /nix/store/$LATEST_PODMAN_PATH/lib/systemd/user/* 27 | 28 | echo "Linked the files in systemd" 29 | 30 | -------------------------------------------------------------------------------- /scripts/set-fleet.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | get_script_path() { 4 | local source="${BASH_SOURCE[0]}" 5 | while [ -h "$source" ]; do 6 | local dir="$(cd -P "$(dirname "$source")" && pwd)" 7 | source="$(readlink "$source")" 8 | [[ $source != /* ]] && source="$dir/$source" 9 | done 10 | echo "$(cd -P "$(dirname "$source")" && pwd)" 11 | } 12 | 13 | SCRIPT_DIR="$(get_script_path)" 14 | 15 | HEADERS=( 16 | -H "kbn-version: 8.15.3" 17 | -H "kbn-xsrf: kibana" 18 | -H 'Content-Type: application/json' 19 | ) 20 | 21 | # Function to check if Fleet API is ready 22 | check_fleet_ready() { 23 | local response 24 | response=$(curl -kL -s --user "elastic:${elastic}" \ 25 | "${HEADERS[@]}" \ 26 | "${LOCAL_KBN_URL}/api/fleet/settings") 27 | 28 | if [[ "$response" == *"Kibana server is not ready yet"* ]]; then 29 | return 1 30 | else 31 | return 0 32 | fi 33 | } 34 | 35 | # Wait for Fleet API to be ready 36 | wait_for_fleet() { 37 | echo "Waiting for Fleet API to be ready..." 38 | max_attempts=60 39 | attempt=1 40 | while ! check_fleet_ready; do 41 | if [ $attempt -ge $max_attempts ]; then 42 | echo "Fleet API did not become ready after $max_attempts attempts. Exiting." 43 | exit 1 44 | fi 45 | echo "Attempt $attempt: Fleet API not ready. Waiting 10 seconds..." 46 | sleep 10 47 | attempt=$((attempt + 1)) 48 | done 49 | echo "Fleet API is ready. Proceeding with configuration..." 50 | } 51 | 52 | set_fleet_values() { 53 | fingerprint=$(/nix/var/nix/profiles/default/bin/podman exec -w /usr/share/elasticsearch/config/certs/ca lme-elasticsearch cat ca.crt | openssl x509 -nout -fingerprint -sha256 | cut -d "=" -f 2| tr -d : | head -n1) 54 | fleet_api_response=$(printf '{"fleet_server_hosts": ["%s"]}' "https://${IPVAR}:${FLEET_PORT}" | curl -kL -v --user "elastic:${elastic}" -XPUT "${HEADERS[@]}" "${LOCAL_KBN_URL}/api/fleet/settings" -d @-) 55 | 56 | echo "Fleet API Response:" 57 | echo "$fleet_api_response" 58 | 59 | printf '{"hosts": ["%s"]}' "https://${IPVAR}:9200" | curl -kL --silent --user "elastic:${elastic}" -XPUT "${HEADERS[@]}" "${LOCAL_KBN_URL}/api/fleet/outputs/fleet-default-output" -d @- | jq 60 | printf '{"ca_trusted_fingerprint": "%s"}' "${fingerprint}" | curl -kL --silent --user "elastic:${elastic}" -XPUT "${HEADERS[@]}" "${LOCAL_KBN_URL}/api/fleet/outputs/fleet-default-output" -d @- | jq 61 | printf '{"config_yaml": "%s"}' "ssl.verification_mode: certificate" | curl -kL --silent --user "elastic:${elastic}" -XPUT "${HEADERS[@]}" "${LOCAL_KBN_URL}/api/fleet/outputs/fleet-default-output" -d @- | jq 62 | policy_id=$(printf '{"name": "%s", "description": "%s", "namespace": "%s", "monitoring_enabled": ["logs","metrics"], "inactivity_timeout": 1209600}' "Endpoint Policy" "" "default" | curl -k --silent --user "elastic:${elastic}" -XPOST "${HEADERS[@]}" "${LOCAL_KBN_URL}/api/fleet/agent_policies?sys_monitoring=true" -d @- | jq -r '.item.id') 63 | echo "Policy ID: ${policy_id}" 64 | pkg_version=$(curl -kL --user "elastic:${elastic}" -XGET "${HEADERS[@]}" "${LOCAL_KBN_URL}/api/fleet/epm/packages/endpoint" -d : | jq -r '.item.version') 65 | printf "{\"name\": \"%s\", \"description\": \"%s\", \"namespace\": \"%s\", \"policy_id\": \"%s\", \"enabled\": %s, \"inputs\": [{\"enabled\": true, \"streams\": [], \"type\": \"ENDPOINT_INTEGRATION_CONFIG\", \"config\": {\"_config\": {\"value\": {\"type\": \"endpoint\", \"endpointConfig\": {\"preset\": \"EDRComplete\"}}}}}], \"package\": {\"name\": \"endpoint\", \"title\": \"Elastic Defend\", \"version\": \"${pkg_version}\"}}" "Elastic Defend" "" "default" "${policy_id}" "true" | curl -k --silent --user "elastic:${elastic}" -XPOST "${HEADERS[@]}" "${LOCAL_KBN_URL}/api/fleet/package_policies" -d @- | jq 66 | } 67 | 68 | #main: 69 | source /opt/lme/lme-environment.env 70 | 71 | # Set the secrets values and export them (source instead of execute) 72 | set -a 73 | . $SCRIPT_DIR/extract_secrets.sh -q 74 | 75 | wait_for_fleet 76 | 77 | set_fleet_values -------------------------------------------------------------------------------- /scripts/set_sysctl_limits.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Check if the script is run as root 4 | if [[ $EUID -ne 0 ]]; then 5 | echo "This script must be run as root" 6 | exit 1 7 | fi 8 | 9 | # Check if NON_ROOT_USER is set 10 | if [ -z ${NON_ROOT_USER+x} ]; then 11 | echo "var NON_ROOT_USER is unset" 12 | exit 1 13 | else 14 | echo "NON_ROOT_USER='$NON_ROOT_USER'" 15 | fi 16 | 17 | # Function to update or add a sysctl setting 18 | update_sysctl() { 19 | local key=$1 20 | local value=$2 21 | local file="/etc/sysctl.conf" 22 | 23 | if grep -qE "^$key\s*=" "$file"; then 24 | sed -i "s/^$key\s*=.*/$key = $value/" "$file" 25 | echo "Updated $key in $file" 26 | elif grep -qE "^#\s*$key\s*=" "$file"; then 27 | sed -i "s/^#\s*$key\s*=.*/$key = $value/" "$file" 28 | echo "Uncommented and updated $key in $file" 29 | else 30 | echo "$key = $value" >> "$file" 31 | echo "Added $key to $file" 32 | fi 33 | } 34 | 35 | # Update sysctl settings 36 | update_sysctl "vm.max_map_count" "262144" 37 | update_sysctl "net.core.rmem_max" "7500000" 38 | update_sysctl "net.core.wmem_max" "7500000" 39 | 40 | # Apply sysctl changes 41 | sysctl -p 42 | 43 | # Update limits.conf 44 | limits_file="/etc/security/limits.conf" 45 | limits_entry="$NON_ROOT_USER soft nofile 655360 46 | $NON_ROOT_USER hard nofile 655360" 47 | 48 | if grep -qE "^$NON_ROOT_USER\s+soft\s+nofile" "$limits_file"; then 49 | echo "$limits_file already configured for $NON_ROOT_USER. No changes needed." 50 | else 51 | echo "$limits_entry" >> "$limits_file" 52 | echo "Updated $limits_file for $NON_ROOT_USER" 53 | fi 54 | 55 | # Display current values 56 | echo "Current sysctl values:" 57 | sysctl net.ipv4.ip_unprivileged_port_start 58 | sysctl vm.max_map_count 59 | sysctl net.core.rmem_max 60 | sysctl net.core.wmem_max 61 | 62 | echo "Script execution completed." 63 | -------------------------------------------------------------------------------- /scripts/set_vault_key_env.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | read -s -p "ANSIBLE_VAULT_PASSWORD:" LME_ANSIBLE_VAULT_PASS 3 | export LME_ANSIBLE_VAULT_PASS=$LME_ANSIBLE_VAULT_PASS 4 | 5 | #set password file ansible-vault variable 6 | export ANSIBLE_VAULT_PASSWORD_FILE=/opt/lme/config/vault-pass.sh 7 | -------------------------------------------------------------------------------- /scripts/upgrade/README.md: -------------------------------------------------------------------------------- 1 | # Upgrading from 1x to 2x 2 | Note: You don't have to upgrade to 2x, but this is the guide to do so. You can just do a fresh install of 2x. You will want to skip to the uninstall section (Section 4) to clear your system of the old version if you are not upgrading. 3 | 1. Checkout the latest version of the LME repository to your home directory 4 | ```bash 5 | cd ~ 6 | git clone https://github.com/cisagov/LME.git 7 | ``` 8 | 1. Export indices: 9 | 10 | Note: *This may take some time witout feedback. Make sure it finishes successfully* 11 | 12 | A successful completion looks like this: 13 | ```bash 14 | Data and mappings export completed. Backup stored in: /lme_backup 15 | Files created: 16 | - /lme_backup/winlogbeat_data.json.gz 17 | - /lme_backup/winlogbeat_mappings.json.gz 18 | ``` 19 | Run this command to export the indices (this may take some time without feedback): 20 | ```bash 21 | cd ~/LME/scripts/upgrade 22 | sudo ./export_1x.sh 23 | ``` 24 | 1. Either export the dashboards or use the existing ones 25 | - If you don't have custom dashboards, you can use the path to the existing ones in the following steps 26 | ```bash 27 | /opt/lme/Chapter 4 Files/dashboards/ or 28 | /opt/lme-old/Chapter 4 Files/dashboards/ 29 | ``` 30 | - If you have custom dashboards, you will need to export them and use that path: 31 | ```bash 32 | # Export all of the dashboards, it is the last option 33 | cd ~/LME/scripts/upgrade/ 34 | pip install -r requirements.txt 35 | export_dashboards.py -u elastic -p yourpassword 36 | ``` 37 | - Your path to use for the importer will be: 38 | ```bash 39 | /yourhomedirectory/LME/scripts/upgrade/exported/ 40 | ``` 41 | 1. Uninstall old LME version 42 | ```bash 43 | sudo su 44 | cd "/opt/lme/Chapter 3 Files/" 45 | ./deploy.sh uninstall 46 | 47 | # Go back to your user 48 | exit 49 | 50 | # If you are using docker for more than lme (You want to keep docker) 51 | sudo docker volume rm lme_esdata 52 | sudo docker volume rm lme_logstashdata 53 | 54 | # If you are only using docker for lme 55 | # Remove existing volumes 56 | cd ~/LME/scripts/upgrade 57 | 58 | sudo su # Become root in the right directory 59 | ./remove_volumes.sh 60 | # Uninstall Docker 61 | ./uninstall_docker.sh 62 | 63 | # Rename the directory to make room for the new install 64 | mv /opt/lme /opt/lme-old 65 | exit # Go back to regular user 66 | ``` 67 | 1. Install LME version 2x 68 | ```bash 69 | #***** Make sure you are running as normal user *****# 70 | sudo apt-get update && sudo apt-get -y install ansible 71 | 72 | # Copy the environment file 73 | cp ~/LME/config/example.env ~/LME/config/lme-environment.env 74 | 75 | # Edit the lme-environment.env and change all the passwords 76 | # vim ~/LME/config/lme-environment.env 77 | 78 | # Change to the script directory 79 | cd ~/LME/ansible/ 80 | 81 | ansible-playbook install_lme_local.yml 82 | 83 | # Load podman into your environment 84 | . ~/.profile 85 | 86 | # Have the full paths of the winlogbeat files that you exported earlier ready 87 | # /lme_backup/winlogbeat_data.json.gz 88 | # /lme_backup/winlogbeat_mappings.json.gz 89 | 90 | cd ../scripts/ 91 | 92 | # This will extract the secrets from the environment file and show them to you. Save these passwords. 93 | . extract_secrets.sh -p 94 | 95 | # This will import the winlogbeat data and mappings use the elastic password from above 96 | ./upgrade/import_1x.sh 97 | 98 | # Use the path from above dashboard export or original dashboards 99 | # Use the elastic password from above. It is the new password for elastic 100 | sudo ./upgrade/import_dashboards.sh -d /opt/lme-old/Chapter\ 4\ Files/dashboards/ 101 | ``` 102 | 103 | You will now want to do the rest of the installation instructions in the README at the root of the repo. 104 | Start with the section after running `ansible-playbook install_lme_local.yml` 105 | -------------------------------------------------------------------------------- /scripts/upgrade/export_1x.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | LME_PATH="/opt/lme" 6 | ES_PORT="9200" 7 | ES_PROTOCOL="https" 8 | 9 | # Function to get the host IP address 10 | get_host_ip() { 11 | ip route get 1 | awk '{print $7;exit}' 12 | } 13 | 14 | ES_HOST=$(get_host_ip) 15 | 16 | # Function to find the drive with the most free space 17 | find_max_space_drive() { 18 | df -h | awk ' 19 | BEGIN { max=0; maxdir="/" } 20 | { 21 | if (NR>1 && $1 !~ /^tmpfs/ && $1 !~ /^efivarfs/ && $1 !~ /^\/dev\/loop/) { 22 | gsub(/[A-Za-z]/, "", $4) 23 | if ($4+0 > max+0) { 24 | max = $4 25 | maxdir = $6 26 | } 27 | } 28 | } 29 | END { print maxdir } 30 | ' 31 | } 32 | 33 | # Function to clean up path (remove double slashes) 34 | clean_path() { 35 | echo "$1" | sed 's#//*#/#g' 36 | } 37 | 38 | # Function to check Elasticsearch connection and version 39 | check_es_connection() { 40 | local response 41 | local http_code 42 | response=$(curl -s -k -u "${ES_USER}:${ES_PASS}" -w "\n%{http_code}" "${ES_PROTOCOL}://${ES_HOST}:${ES_PORT}") 43 | http_code=$(echo "$response" | tail -n1) 44 | body=$(echo "$response" | sed '$d') 45 | 46 | if [ "$http_code" = "200" ]; then 47 | es_version=$(echo "$body" | jq -r '.version.number') 48 | if [[ "${es_version}" =~ ^8\. ]]; then 49 | echo "Successfully connected to Elasticsearch version ${es_version}" 50 | return 0 51 | else 52 | echo "Unsupported Elasticsearch version: ${es_version}. This script supports Elasticsearch 8.x." 53 | return 1 54 | fi 55 | elif [ "$http_code" = "401" ]; then 56 | echo "Authentication failed. Please check your username and password." 57 | return 1 58 | else 59 | echo "Failed to connect to Elasticsearch. HTTP status code: ${http_code}" 60 | return 1 61 | fi 62 | } 63 | 64 | # Function to export data and mappings using Docker and elasticdump 65 | export_data_and_mappings() { 66 | local output_dir="$1" 67 | 68 | echo "Exporting winlogbeat-* indices data..." 69 | docker run --rm -v "${output_dir}:${output_dir}" \ 70 | --network host \ 71 | -e NODE_TLS_REJECT_UNAUTHORIZED=0 \ 72 | elasticdump/elasticsearch-dump \ 73 | --input=${ES_PROTOCOL}://${ES_USER}:${ES_PASS}@${ES_HOST}:${ES_PORT}/winlogbeat-* \ 74 | --output=$ \ 75 | --type=data \ 76 | --headers='{"Content-Type": "application/json"}' \ 77 | --sslVerification=false | gzip > "${output_dir}/winlogbeat_data.json.gz" 78 | 79 | echo "Exporting winlogbeat-* indices mappings..." 80 | docker run --rm -v "${output_dir}:${output_dir}" \ 81 | --network host \ 82 | -e NODE_TLS_REJECT_UNAUTHORIZED=0 \ 83 | elasticdump/elasticsearch-dump \ 84 | --input=${ES_PROTOCOL}://${ES_USER}:${ES_PASS}@${ES_HOST}:${ES_PORT}/winlogbeat-* \ 85 | --output=$ \ 86 | --type=mapping \ 87 | --headers='{"Content-Type": "application/json"}' \ 88 | --sslVerification=false | gzip > "${output_dir}/winlogbeat_mappings.json.gz" 89 | } 90 | 91 | # Function to prompt for password securely 92 | prompt_password() { 93 | local prompt="$1" 94 | local password 95 | while IFS= read -p "$prompt" -r -s -n 1 char 96 | do 97 | if [[ $char == $'\0' ]]; then 98 | break 99 | fi 100 | prompt='*' 101 | password+="$char" 102 | done 103 | echo "$password" 104 | } 105 | 106 | # Main script 107 | echo "LME Data Export Script for Elasticsearch 8.x" 108 | echo "============================================" 109 | 110 | echo "Using host IP: ${ES_HOST}" 111 | 112 | # Check if Docker is installed and running 113 | if ! command -v docker &> /dev/null; then 114 | echo "Error: Docker is not installed. Please install Docker to proceed." 115 | exit 1 116 | fi 117 | 118 | if ! docker info &> /dev/null; then 119 | echo "Error: Docker daemon is not running. Please start Docker to proceed." 120 | exit 1 121 | fi 122 | 123 | # Prompt for Elasticsearch credentials and verify connection 124 | while true; do 125 | read -p "Enter Elasticsearch username: " ES_USER 126 | ES_PASS=$(prompt_password "Enter Elasticsearch password: ") 127 | echo # Move to a new line after password input 128 | 129 | if check_es_connection; then 130 | break 131 | else 132 | echo "Would you like to try again? (y/n)" 133 | read -r retry 134 | if [[ ! $retry =~ ^[Yy]$ ]]; then 135 | echo "Exiting script." 136 | exit 1 137 | fi 138 | fi 139 | done 140 | 141 | # Determine backup location 142 | echo "Choose backup directory:" 143 | echo "1. Specify a directory" 144 | echo "2. Automatically find directory with most space" 145 | read -p "Enter your choice (1 or 2): " dir_choice 146 | 147 | case $dir_choice in 148 | 1) 149 | read -p "Enter the backup directory path: " BACKUP_DIR 150 | ;; 151 | 2) 152 | max_space_dir=$(find_max_space_drive) 153 | BACKUP_DIR=$(clean_path "${max_space_dir}/lme_backup") 154 | echo "Directory with most free space: $BACKUP_DIR" 155 | read -p "Is this okay? (y/n): " confirm 156 | if [[ $confirm != [Yy]* ]]; then 157 | echo "Please run the script again and choose option 1 to specify a directory." 158 | exit 1 159 | fi 160 | ;; 161 | *) 162 | echo "Invalid choice. Exiting." 163 | exit 1 164 | ;; 165 | esac 166 | 167 | # Clean up the final BACKUP_DIR path 168 | BACKUP_DIR=$(clean_path "$BACKUP_DIR") 169 | 170 | # Create backup directory if it doesn't exist 171 | mkdir -p "${BACKUP_DIR}" 172 | 173 | # Export data and mappings 174 | export_data_and_mappings "${BACKUP_DIR}" 175 | 176 | echo "Data and mappings export completed. Backup stored in: ${BACKUP_DIR}" 177 | echo "Files created:" 178 | echo " - ${BACKUP_DIR}/winlogbeat_data.json.gz" 179 | echo " - ${BACKUP_DIR}/winlogbeat_mappings.json.gz" -------------------------------------------------------------------------------- /scripts/upgrade/fix_dashboard_titles.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Function to fix dashboard title 4 | fix_dashboard_title() { 5 | local file="$1" 6 | local temp_file="${file}.tmp" 7 | 8 | # Process the file line by line 9 | while IFS= read -r line || [[ -n "$line" ]]; do 10 | if echo "$line" | jq -e 'select(.type == "dashboard")' > /dev/null 2>&1; then 11 | # It's a dashboard object, update the title 12 | updated_line=$(echo "$line" | jq -c ' 13 | if .attributes.title and (.attributes.title | startswith("1x-") | not) then 14 | .attributes.title = "1x-" + .attributes.title 15 | else 16 | . 17 | end 18 | ') 19 | echo "$updated_line" >> "$temp_file" 20 | else 21 | # Not a dashboard object, keep the line as is 22 | echo "$line" >> "$temp_file" 23 | fi 24 | done < "$file" 25 | 26 | # Replace the original file with the updated one 27 | mv "$temp_file" "$file" 28 | echo "Updated $file" 29 | } 30 | 31 | # Check if jq is installed 32 | if ! command -v jq &> /dev/null; then 33 | echo "Error: jq is not installed. Please install jq to run this script." 34 | exit 1 35 | fi 36 | 37 | # Check if a directory was provided 38 | if [ $# -eq 0 ]; then 39 | echo "Error: No directory specified" 40 | echo "Usage: $0 " 41 | exit 1 42 | fi 43 | 44 | DASHBOARDS_DIR="$1" 45 | 46 | # Check if the provided directory exists 47 | if [ ! -d "$DASHBOARDS_DIR" ]; then 48 | echo "Error: Directory not found: $DASHBOARDS_DIR" 49 | exit 1 50 | fi 51 | 52 | # Process all .ndjson files in the specified directory 53 | echo "Processing .ndjson files in $DASHBOARDS_DIR" 54 | for file in "$DASHBOARDS_DIR"/*.ndjson; do 55 | if [[ -f "$file" ]]; then 56 | fix_dashboard_title "$file" 57 | fi 58 | done 59 | 60 | echo "All .ndjson files have been processed." -------------------------------------------------------------------------------- /scripts/upgrade/import_dashboards.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Get the directory of the current script 4 | SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" 5 | 6 | ENV_FILE="/opt/lme/lme-environment" 7 | 8 | # Function to display usage information 9 | usage() { 10 | echo "Usage: $0 -d DIRECTORY [OPTIONS]" 11 | echo "Options:" 12 | echo " -d, --directory PATH Path to the dashboards directory (required)" 13 | echo " -u, --user USERNAME Elasticsearch username (default: elastic)" 14 | echo " -h, --help Display this help message" 15 | echo "Note: The script will prompt for the password if ELASTIC_PASSWORD is not set." 16 | exit 1 17 | } 18 | 19 | # Function to read password securely 20 | read_password() { 21 | 22 | if [ -t 0 ]; then 23 | read -s -p "Enter Elasticsearch password: " PASSWORD 24 | echo 25 | else 26 | read PASSWORD 27 | fi 28 | } 29 | 30 | # Initialize variables 31 | USER="elastic" 32 | PASSWORD="" 33 | DASHBOARDS_DIR="" 34 | 35 | # Parse command line arguments 36 | while [[ $# -gt 0 ]]; do 37 | key="$1" 38 | case $key in 39 | -u|--user) 40 | USER="$2" 41 | shift 2 42 | ;; 43 | -d|--directory) 44 | DASHBOARDS_DIR="$2" 45 | shift 2 46 | ;; 47 | -h|--help) 48 | usage 49 | ;; 50 | *) 51 | echo "Unknown option: $1" 52 | usage 53 | ;; 54 | esac 55 | done 56 | 57 | # Check if dashboards directory is provided 58 | if [ -z "$DASHBOARDS_DIR" ]; then 59 | echo "Error: Dashboards directory (-d) is required." 60 | usage 61 | fi 62 | 63 | 64 | # Check for password 65 | if [ -z "$ELASTIC_PASSWORD" ]; then 66 | echo "ELASTIC_PASSWORD is not set. Please enter the password." 67 | read_password 68 | else 69 | echo "Using password from ELASTIC_PASSWORD environment variable." 70 | PASSWORD="$ELASTIC_PASSWORD" 71 | fi 72 | 73 | # Check if the dashboards directory exists 74 | if [ ! -d "$DASHBOARDS_DIR" ]; then 75 | echo "Error: Dashboards directory not found: $DASHBOARDS_DIR" 76 | exit 1 77 | fi 78 | 79 | # Convert DASHBOARDS_DIR to absolute path 80 | DASHBOARDS_DIR=$(realpath "$DASHBOARDS_DIR") 81 | 82 | # Check if fix_dashboard_titles.sh exists in the same directory as this script 83 | FIX_SCRIPT="${SCRIPT_DIR}/fix_dashboard_titles.sh" 84 | if [ ! -f "$FIX_SCRIPT" ]; then 85 | echo "Error: fix_dashboard_titles.sh not found in the script directory: $SCRIPT_DIR" 86 | exit 1 87 | fi 88 | 89 | # Make fix_dashboard_titles.sh executable 90 | chmod +x "$FIX_SCRIPT" 91 | 92 | # Run fix_dashboard_titles.sh with the DASHBOARDS_DIR 93 | echo "Fixing dashboard titles in $DASHBOARDS_DIR..." 94 | "$FIX_SCRIPT" "$DASHBOARDS_DIR" 95 | 96 | # Check the exit status of fix_dashboard_titles.sh 97 | if [ $? -ne 0 ]; then 98 | echo "Error: fix_dashboard_titles.sh failed. Exiting." 99 | exit 1 100 | fi 101 | 102 | # Get list of dashboard files 103 | IFS=$'\n' 104 | DASHBOARDS=($(ls -1 "${DASHBOARDS_DIR}"/*.ndjson)) 105 | 106 | # Check if any dashboard files were found 107 | if [ ${#DASHBOARDS[@]} -eq 0 ]; then 108 | echo "Error: No dashboard files found in $DASHBOARDS_DIR" 109 | exit 1 110 | fi 111 | 112 | echo "Found ${#DASHBOARDS[@]} dashboard files." 113 | 114 | # Upload dashboards 115 | for db in "${DASHBOARDS[@]}"; do 116 | echo "Uploading ${db##*/} dashboard" 117 | curl -X POST -kL --user "${USER}:${PASSWORD}" -H 'kbn-xsrf: true' --form file="@${db}" "https://127.0.0.1/api/saved_objects/_import?overwrite=true" 118 | echo 119 | done 120 | 121 | echo "Dashboard update completed." -------------------------------------------------------------------------------- /scripts/upgrade/remove_volumes.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Script to remove Docker volumes 4 | 5 | # Function to check if Docker is installed 6 | check_docker_installed() { 7 | if ! command -v docker &> /dev/null; then 8 | echo "Error: Docker is not installed on this system." 9 | exit 1 10 | fi 11 | } 12 | 13 | # Function to check if Docker daemon is running 14 | check_docker_running() { 15 | if ! docker info &> /dev/null; then 16 | echo "Error: Docker daemon is not running." 17 | exit 1 18 | fi 19 | } 20 | 21 | # Function to remove all Docker volumes 22 | remove_docker_volumes() { 23 | echo "Removing all Docker volumes..." 24 | 25 | # List all volumes 26 | volumes=$(docker volume ls -q) 27 | 28 | if [ -z "$volumes" ]; then 29 | echo "No Docker volumes found." 30 | else 31 | # Remove each volume 32 | for volume in $volumes; do 33 | echo "Removing volume: $volume" 34 | docker volume rm "$volume" 35 | done 36 | echo "All Docker volumes have been removed." 37 | fi 38 | } 39 | 40 | # Main execution 41 | echo "Docker Volume Removal Script" 42 | echo "============================" 43 | 44 | # Check if Docker is installed 45 | check_docker_installed 46 | 47 | # Check if Docker daemon is running 48 | check_docker_running 49 | 50 | # Check for -y flag 51 | if [[ "$1" == "-y" ]]; then 52 | remove_docker_volumes 53 | else 54 | # Prompt for confirmation 55 | read -p "Are you sure you want to remove all Docker volumes? This action cannot be undone. (y/n): " confirm 56 | 57 | if [[ $confirm == [Yy]* ]]; then 58 | remove_docker_volumes 59 | else 60 | echo "Operation cancelled. No volumes were removed." 61 | fi 62 | fi 63 | 64 | echo "Script completed." -------------------------------------------------------------------------------- /scripts/upgrade/requirements.txt: -------------------------------------------------------------------------------- 1 | requests 2 | urllib3 -------------------------------------------------------------------------------- /scripts/upgrade/uninstall_docker.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Uninstall Docker script for Ubuntu 22.04 4 | 5 | # Function to safely remove a file 6 | safe_remove() { 7 | if [ -e "$1" ]; then 8 | sudo rm -f "$1" 9 | echo "Removed: $1" 10 | else 11 | echo "File not found, skipping: $1" 12 | fi 13 | } 14 | 15 | # Stop the Docker daemon 16 | sudo systemctl stop docker.service 17 | sudo systemctl stop docker.socket 18 | 19 | # Uninstall Docker Engine, CLI, Containerd, and Docker Compose 20 | sudo apt-get purge -y docker-ce docker-ce-cli containerd.io docker-compose-plugin docker-ce-rootless-extras docker-buildx-plugin 21 | 22 | # Remove Docker directories and files 23 | sudo rm -rf /var/lib/docker 24 | sudo rm -rf /var/lib/containerd 25 | sudo rm -rf /etc/docker 26 | sudo rm -rf ~/.docker 27 | 28 | # Remove the Docker repository 29 | safe_remove /etc/apt/sources.list.d/docker.list 30 | 31 | # Remove the Docker GPG key 32 | safe_remove /etc/apt/keyrings/docker.gpg 33 | safe_remove /usr/share/keyrings/docker-archive-keyring.gpg # Check alternative location 34 | 35 | # Update the package cache 36 | sudo apt-get update 37 | 38 | # Auto-remove any unused dependencies 39 | sudo apt-get autoremove -y 40 | 41 | echo "Docker has been uninstalled from your Ubuntu 22.04 system." 42 | echo "You may need to reboot your system for all changes to take effect." -------------------------------------------------------------------------------- /scripts/upload.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | source .env 3 | USER=elastic 4 | PASSWORD=${ELASTIC_PASSWORD_ESCAPED} 5 | PROTO=https 6 | 7 | #TODO: make this a cli flag 8 | #------------ edit this----------- 9 | #assumes files are INDEX_mapping.json + INDEX.json 10 | # mapping + logs 11 | DIR=/data/alerts/ 12 | INDICES=$(ls ${DIR} | cut -f -3 -d '.' | grep -v "_mapping"| grep -v "template"| sort | uniq) 13 | 14 | #------------ edit this ----------- 15 | 16 | echo -e "\n\ncheck \`podman logs -f CONTAINER_NAME\` for verbose output\n\n" 17 | echo -e "\n--Uploading: --\n" 18 | for x in ${INDICES}; 19 | do 20 | echo "podman runs for $x:" 21 | podman run -it -d -v ${DIR}${x}_mapping.json:/tmp/data.json -e NODE_TLS_REJECT_UNAUTHORIZED=0 --userns="" --network=host elasticdump/elasticsearch-dump --input=/tmp/data.json --output=${PROTO}://${USER}:${PASSWORD}@localhost:9200/${x} --type=mapping 22 | 23 | podman run -it -d -v ${DIR}${x}.json:/tmp/data.json -e NODE_TLS_REJECT_UNAUTHORIZED=0 --userns="" --network=host elasticdump/elasticsearch-dump --input=/tmp/data.json --output=${PROTO}://${USER}:${PASSWORD}@localhost:9200/${x} --limit=5000 24 | echo "" 25 | done 26 | 27 | ## cleanup: 28 | echo "--to cleanup when done:--" 29 | echo "podman ps -a --format \"{{.Image}} {{.Names}}\" | grep -i "elasticdump" | awk \'{print $2}\' | xargs podman rm" 30 | 31 | tot=$(wc -l $(ls ${DIR} | grep -v "_mapping" | xargs -I{} echo ${DIR}{})) 32 | echo -e "\n--Expected Log #:\n $tot--" 33 | 34 | -------------------------------------------------------------------------------- /scripts/wazuh_rbac.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | source /root/.profile 3 | podman exec -it lme-wazuh-manager /var/ossec/bin/rbac_control change-password 4 | --------------------------------------------------------------------------------