├── .github ├── ISSUE_TEMPLATE │ ├── bug_report.md │ └── feature_request.md ├── PULL_REQUEST_TEMPLATE.md └── workflows │ ├── build.yml │ └── release.yml ├── .gitignore ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── Cargo.toml ├── Dockerfile ├── LICENSE ├── README.md ├── build-container.sh ├── install ├── README.md └── install.sh ├── integrations ├── ArcSight │ ├── MF_ArcSight_and_Polyverse_ZeroTect_SolutionBrief_v1.1.pdf │ ├── MF_Polyverse_ZeroTect_0.4_ArcSight_CEF_Integration_Guide_2020.pdf │ └── MicroFocus_ArcSight_CEF_FieldMappings.xlsx └── PagerDuty │ ├── README.md │ ├── pagerduty-demo-step-2.png │ ├── pagerduty-demo-step-3.png │ ├── pagerduty-demo-step-4.png │ ├── pagerduty-demo-step-5.png │ ├── pagerduty-demo-step-6.png │ ├── pagerduty-demo-step-7.png │ ├── pagerduty-demo-step-8.png │ └── pagerduty-integration-key.png ├── reference ├── how_zerotect_works.drawio ├── schema.json └── zerotect.toml ├── src ├── analyzer │ ├── close_by_ip_detect.rs │ ├── close_by_register_detect.rs │ ├── eventbuffer.rs │ └── mod.rs ├── common.rs ├── emitter │ ├── console.rs │ ├── filelogger.rs │ ├── mod.rs │ ├── pagerduty.rs │ ├── polycorder.rs │ └── syslogger.rs ├── events.rs ├── formatter │ ├── cef.rs │ ├── error.rs │ ├── json.rs │ ├── mod.rs │ └── text.rs ├── main.rs ├── params.rs ├── raw_event_stream.rs └── system.rs └── usecase ├── datafaulter.c ├── instrfaulter.c ├── invalidopcode.c └── segfault_at_location.c /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Describe the bug** 11 | A clear and concise description of what the bug is. 12 | 13 | **Hardware and software used (Please fill out each section)** 14 | 1. Hardware 15 | 2. Distro 16 | 3. Distro version 17 | 4. Zerotect version 18 | 19 | **Expected behavior** 20 | A clear and concise description of what you expected to happen. 21 | 22 | **Actual behavior** 23 | A clear and concise description of what actually happened. 24 | 25 | **To Reproduce** 26 | Steps to reproduce the behavior: 27 | 1. Go to '...' 28 | 2. Click on '....' 29 | 3. Scroll down to '....' 30 | 4. See error 31 | 32 | **Screenshots** 33 | If applicable, add screenshots to help explain your problem. 34 | 35 | **Additional context** 36 | Add any other context about the problem here. 37 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Is your feature request related to a problem? Please describe.** 11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 12 | 13 | **Describe the solution you'd like** 14 | A clear and concise description of what you want to happen. 15 | 16 | **Describe alternatives you've considered** 17 | A clear and concise description of any alternative solutions or features you've considered. 18 | 19 | **Additional context** 20 | Add any other context or screenshots about the feature request here. 21 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | # Description 2 | 3 | Please include a summary of the change and which issue is fixed. Please also include relevant motivation and context. 4 | 5 | Fixes # (issue) 6 | -------------------------------------------------------------------------------- /.github/workflows/build.yml: -------------------------------------------------------------------------------- 1 | name: Build Status 2 | 3 | on: [push, pull_request] 4 | 5 | jobs: 6 | build: 7 | runs-on: ubuntu-latest 8 | if: github.event_name == 'push' 9 | steps: 10 | - uses: actions/checkout@v2 11 | 12 | - name: Style (rustfmt) 13 | run: docker run -v cargo-cache:/root/.cargo/registry -v $PWD:/volume --rm -t ghcr.io/polyverse/rust-dev-env:latest cargo fmt -- --check 14 | 15 | - name: Lint (Clippy) 16 | run: docker run -v cargo-cache:/root/.cargo/registry -v $PWD:/volume --rm -t ghcr.io/polyverse/rust-dev-env:latest cargo clippy -- -D warnings 17 | 18 | - name: Test 19 | run: docker run -v cargo-cache:/root/.cargo/registry -v $PWD:/volume --rm -t --privileged ghcr.io/polyverse/rust-dev-env:latest cargo test 20 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: Release 2 | 3 | on: 4 | push: 5 | # Publish `v1.2.3` tags as releases. 6 | tags: 7 | - v* 8 | 9 | jobs: 10 | release: 11 | runs-on: ubuntu-latest 12 | if: github.event_name == 'push' 13 | steps: 14 | - uses: actions/checkout@v2 15 | 16 | - name: Style (rustfmt) 17 | run: docker run -v cargo-cache:/root/.cargo/registry -v $PWD:/volume --rm -t ghcr.io/polyverse/rust-dev-env:latest cargo fmt -- --check 18 | 19 | - name: Lint (Clippy) 20 | run: docker run -v cargo-cache:/root/.cargo/registry -v $PWD:/volume --rm -t ghcr.io/polyverse/rust-dev-env:latest cargo clippy -- -D warnings 21 | 22 | - name: Test 23 | run: docker run -v cargo-cache:/root/.cargo/registry -v $PWD:/volume --rm -t --privileged ghcr.io/polyverse/rust-dev-env:latest cargo test 24 | 25 | - name: Build Release executable 26 | run: docker run -v cargo-cache:/root/.cargo/registry -v $PWD:/volume --rm -t ghcr.io/polyverse/rust-dev-env cargo build --release 27 | 28 | - name: Create Release 29 | id: create_release 30 | uses: actions/create-release@v1 31 | env: 32 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # This token is provided by Actions, you do not need to create your own token 33 | with: 34 | tag_name: ${{ github.ref }} 35 | release_name: Release ${{ github.ref }} 36 | draft: false 37 | prerelease: false 38 | 39 | - name: Upload Zerotect Executable 40 | id: upload-zerotect-executable 41 | uses: actions/upload-release-asset@v1 42 | env: 43 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 44 | with: 45 | upload_url: ${{ steps.create_release.outputs.upload_url }} # This pulls from the CREATE RELEASE step above, referencing it's ID to get its outputs object, which include a `upload_url`. See this blog post for more info: https://jasonet.co/posts/new-features-of-github-actions/#passing-data-to-future-steps 46 | asset_path: ./target/x86_64-unknown-linux-musl/release/zerotect 47 | asset_name: zerotect 48 | asset_content_type: application/x-executable 49 | 50 | - name: Upload Zerotect Installer 51 | id: upload-zerotect-installer 52 | uses: actions/upload-release-asset@v1 53 | env: 54 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 55 | with: 56 | upload_url: ${{ steps.create_release.outputs.upload_url }} # This pulls from the CREATE RELEASE step above, referencing it's ID to get its outputs object, which include a `upload_url`. See this blog post for more info: https://jasonet.co/posts/new-features-of-github-actions/#passing-data-to-future-steps 57 | asset_path: ./install/install.sh 58 | asset_name: install.sh 59 | asset_content_type: application/x-shellscript 60 | 61 | - name: Upload Zerotect Reference config file 62 | id: upload-zerotect-reference-config-file 63 | uses: actions/upload-release-asset@v1 64 | env: 65 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 66 | with: 67 | upload_url: ${{ steps.create_release.outputs.upload_url }} # This pulls from the CREATE RELEASE step above, referencing it's ID to get its outputs object, which include a `upload_url`. See this blog post for more info: https://jasonet.co/posts/new-features-of-github-actions/#passing-data-to-future-steps 68 | asset_path: ./reference/zerotect.toml 69 | asset_name: zerotect.toml 70 | asset_content_type: application/toml 71 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Generated by Cargo 2 | # will have compiled files and executables 3 | /target/ 4 | 5 | # Remove Cargo.lock from gitignore if creating an executable, leave it for libraries 6 | # More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html 7 | Cargo.lock 8 | 9 | # These are backup files generated by rustfmt 10 | **/*.rs.bk 11 | 12 | 13 | #Added by cargo 14 | # 15 | #already existing elements are commented out 16 | 17 | /target 18 | #**/*.rs.bk 19 | 20 | # Any a.outs 21 | a.out 22 | 23 | 24 | # MacOS 25 | .DS_Store 26 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | 2 | # Contributor Covenant Code of Conduct 3 | 4 | ## Our Pledge 5 | 6 | We as members, contributors, and leaders pledge to make participation in our 7 | community a harassment-free experience for everyone, regardless of age, body 8 | size, visible or invisible disability, ethnicity, sex characteristics, gender 9 | identity and expression, level of experience, education, socio-economic status, 10 | nationality, personal appearance, race, religion, or sexual identity 11 | and orientation. 12 | 13 | We pledge to act and interact in ways that contribute to an open, welcoming, 14 | diverse, inclusive, and healthy community. 15 | 16 | ## Our Standards 17 | 18 | Examples of behavior that contributes to a positive environment for our 19 | community include: 20 | 21 | * Demonstrating empathy and kindness toward other people 22 | * Being respectful of differing opinions, viewpoints, and experiences 23 | * Giving and gracefully accepting constructive feedback 24 | * Accepting responsibility and apologizing to those affected by our mistakes, 25 | and learning from the experience 26 | * Focusing on what is best not just for us as individuals, but for the 27 | overall community 28 | 29 | Examples of unacceptable behavior include: 30 | 31 | * The use of sexualized language or imagery, and sexual attention or 32 | advances of any kind 33 | * Trolling, insulting or derogatory comments, and personal or political attacks 34 | * Public or private harassment 35 | * Publishing others' private information, such as a physical or email 36 | address, without their explicit permission 37 | * Other conduct which could reasonably be considered inappropriate in a 38 | professional setting 39 | 40 | ## Enforcement Responsibilities 41 | 42 | Community leaders are responsible for clarifying and enforcing our standards of 43 | acceptable behavior and will take appropriate and fair corrective action in 44 | response to any behavior that they deem inappropriate, threatening, offensive, 45 | or harmful. 46 | 47 | Community leaders have the right and responsibility to remove, edit, or reject 48 | comments, commits, code, wiki edits, issues, and other contributions that are 49 | not aligned to this Code of Conduct, and will communicate reasons for moderation 50 | decisions when appropriate. 51 | 52 | ## Scope 53 | 54 | This Code of Conduct applies within all community spaces, and also applies when 55 | an individual is officially representing the community in public spaces. 56 | Examples of representing our community include using an official e-mail address, 57 | posting via an official social media account, or acting as an appointed 58 | representative at an online or offline event. 59 | 60 | ## Enforcement 61 | 62 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 63 | reported to the community leaders responsible for enforcement at 64 | support@polyverse.com. 65 | All complaints will be reviewed and investigated promptly and fairly. 66 | 67 | All community leaders are obligated to respect the privacy and security of the 68 | reporter of any incident. 69 | 70 | ## Enforcement Guidelines 71 | 72 | Community leaders will follow these Community Impact Guidelines in determining 73 | the consequences for any action they deem in violation of this Code of Conduct: 74 | 75 | ### 1. Correction 76 | 77 | **Community Impact**: Use of inappropriate language or other behavior deemed 78 | unprofessional or unwelcome in the community. 79 | 80 | **Consequence**: A private, written warning from community leaders, providing 81 | clarity around the nature of the violation and an explanation of why the 82 | behavior was inappropriate. A public apology may be requested. 83 | 84 | ### 2. Warning 85 | 86 | **Community Impact**: A violation through a single incident or series 87 | of actions. 88 | 89 | **Consequence**: A warning with consequences for continued behavior. No 90 | interaction with the people involved, including unsolicited interaction with 91 | those enforcing the Code of Conduct, for a specified period of time. This 92 | includes avoiding interactions in community spaces as well as external channels 93 | like social media. Violating these terms may lead to a temporary or 94 | permanent ban. 95 | 96 | ### 3. Temporary Ban 97 | 98 | **Community Impact**: A serious violation of community standards, including 99 | sustained inappropriate behavior. 100 | 101 | **Consequence**: A temporary ban from any sort of interaction or public 102 | communication with the community for a specified period of time. No public or 103 | private interaction with the people involved, including unsolicited interaction 104 | with those enforcing the Code of Conduct, is allowed during this period. 105 | Violating these terms may lead to a permanent ban. 106 | 107 | ### 4. Permanent Ban 108 | 109 | **Community Impact**: Demonstrating a pattern of violation of community 110 | standards, including sustained inappropriate behavior, harassment of an 111 | individual, or aggression toward or disparagement of classes of individuals. 112 | 113 | **Consequence**: A permanent ban from any sort of public interaction within 114 | the community. 115 | 116 | ## Attribution 117 | 118 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], 119 | version 2.0, available at 120 | https://www.contributor-covenant.org/version/2/0/code_of_conduct.html. 121 | 122 | Community Impact Guidelines were inspired by [Mozilla's code of conduct 123 | enforcement ladder](https://github.com/mozilla/diversity). 124 | 125 | [homepage]: https://www.contributor-covenant.org 126 | 127 | For answers to common questions about this code of conduct, see the FAQ at 128 | https://www.contributor-covenant.org/faq. Translations are available at 129 | https://www.contributor-covenant.org/translations. 130 | 131 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing 2 | 3 | Please note we have a [Code of Conduct](CODE_OF_CONDUCT.md), please follow it in all your interactions with the project. 4 | 5 | ## Contributing Process 6 | 1. Read about what kind of contributions we look for and will accept in the section below. 7 | 2. Before doing any work, file an issue to explain the problem you're trying to solve and how you intend to solve it. 8 | It can save a lot of effort and frustration to discuss what you intend to do and whether it will be merged. 9 | 3. Submit a PR with changes. If a charge is very large or disruptive, we prefer keeping the overarching issue open, 10 | and submitting smaller focussed PRs that build up to the feature. This helps each PR build incrementally on the others 11 | and keeps cognitive load low. 12 | 13 | ## What we accept 14 | 15 | 1. We accept pretty much ALL contributions so long as: 16 | (a) They don't break an existing use-case or dependant 17 | (b) They don't do something that is wildly out of scope of the project. 18 | 2. We don't have extensive coding guidelines and pretty much everything goes so long as: 19 | (a) It is safe rust and reasonable. 20 | (b) There are extensive UTs to define the feature. 21 | (c) You may use `cargo fmt` for formatting. It's not perfect and it can be downright ugly. But it's consistent. 22 | 3. For Zerotect, Unit Tests are documentation where truth is captured. What kinds of events does it parse? What formats 23 | does it emit in? What config flags does it support? All answers are found in UTs. 24 | 25 | ## Pull Request Process 26 | 27 | 1. Ensure any install or build dependencies are removed before the end of the layer when doing a 28 | build. 29 | 2. Update the README.md with details of changes to the interface, this includes new environment 30 | variables, exposed ports, useful file locations and container parameters. 31 | 3. Once approved, the reviewer will merge your Pull Request. 32 | 33 | ## Reporting a Bug 34 | 35 | 1. Install the latest version of Zerotect, and try to reproduce the bug. You can find installation and uninstallation 36 | instructions [here](install/README.md). 37 | 2. Look at our [issue tracker](https://github.com/polyverse/zerotect/issues) to make sure someone else hasn't reported the same bug. 38 | 3. If nobody else has reported the issue, create a bug report! Our issue tracker has a bug report template, please fill out as many of the 39 | sections as you can. 40 | 41 | ## Feature Request 42 | 43 | 1. Install the latest version of Zerotect. Maybe your feature slipped into the latest version? 44 | 2. Look at our [issue tracker](https://github.com/polyverse/zerotect/issues) to see if anyone else has request the same 45 | feature, or something similar. 46 | 3. If nobody has a similar feature request, write one up in the [issue tracker](https://github.com/polyverse/zerotect/issues)! 47 | Our issue tracker has a bug report template, please fill out as many of the 48 | sections as you can. 49 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "zerotect" 3 | version = "0.4.24" 4 | authors = ["Archis Gore "] 5 | edition = "2018" 6 | license = "Apache-2.0" 7 | 8 | [dependencies] 9 | log = "0.4.14" 10 | sys-info = "0.9.1" 11 | sysctl = "0.4.3" 12 | strum = "0.23.0" 13 | strum_macros = "0.23.1" 14 | regex = "1.5.4" 15 | enum-display-derive = "0.1.1" 16 | num = "0.4.0" 17 | lazy_static = "1.4.0" 18 | num-traits = "0.2.14" 19 | num-derive = "0.3.3" 20 | timeout-iterator = { version = "1.1.7" } 21 | serde = { version = "1.0.132", features = ["derive", "rc"] } 22 | serde_json = { version = "1.0.73", features = ["arbitrary_precision"] } 23 | toml = "0.5.8" 24 | http = "0.2.5" 25 | rmesg = {version = "1.0.18", features = ["extra-traits"] } 26 | libflate = "1.1.1" 27 | rust-cef-derive = "0.2.6" 28 | rust-cef = "0.2.6" 29 | syslog = "6.0.1" 30 | libc = "0.2.112" 31 | file-rotation = { version = "0.4.2" } 32 | futures = "0.3.19" 33 | pagerduty-rs = {version = "0.1.4" } 34 | time = {version = "0.3.5", features = ["std", "serde", "formatting", "parsing"] } 35 | reqwest = { version = "0.11.8"} 36 | tokio = { version = "1.15.0", features = ["rt", "macros", "time", "sync"] } 37 | tokio-stream = "0.1.8" 38 | pin-project = { version = "1.0.8" } 39 | 40 | [dependencies.clap] 41 | version = "2.34.0" 42 | # Disable all features - just minimal brutal args parsing 43 | default-features = false 44 | 45 | [dev-dependencies] 46 | assert_matches = "1.5.0" 47 | rand = "0.8.2" 48 | pretty_assertions = "1.0.0" 49 | 50 | [profile.dev] 51 | # We don't need stack unwinding in dev either - can be manually enabled 52 | panic = 'abort' 53 | 54 | [profile.release] 55 | # We don't need stack unwinding in releases 56 | panic = 'abort' 57 | # Enable LTO for release (since it only builds in Travis and doesn't block day to day) 58 | lto = "fat" 59 | # One code-gen unit so we get a highly optimized binary 60 | codegen-units = 1 61 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM scratch 2 | COPY target/x86_64-unknown-linux-musl/release/zerotect / 3 | ENTRYPOINT [ "/zerotect" ] -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # zerotect 2 | 3 | [![Build Status](https://travis-ci.org/polyverse/zerotect.svg?branch=master)](https://travis-ci.org/polyverse/zerotect) 4 | 5 | [![Contributor Covenant](https://img.shields.io/badge/Contributor%20Covenant-v2.0%20adopted-ff69b4.svg)](CODE_OF_CONDUCT.md) 6 | 7 | ## Table of Contents 8 | 9 | * [What is Zerotect](#what-is-zerotect) 10 | * [Install Zerotect](#install-zerotect) 11 | * [Usage](#usage) 12 | * [Recommended usage](#recommended-usage) 13 | * [Understanding the minimal configuration](#understanding-the-minimal-configuration) 14 | * [All CLI options](#all-cli-options) 15 | * [Partners/Integrations](#partnersintegrations) 16 | * [Micro Focus ArcSight](#micro-focus-arcsight) 17 | * [PagerDuty](#pagerduty) 18 | * [Zerotect Log](#zerotect-log) 19 | * [Contributing](#contributing) 20 | * [Zero Day Reward Program](#zero-day-reward-program) 21 | 22 | ## What is Zerotect 23 | 24 | Detecting malicious scans can be the first indicator of a potential attack. 25 | Watching for things like port scans is commonplace in security circles, but how 26 | do you detect a BROP attack, or any other kind of buffer-overflow attack for 27 | that matter? 28 | 29 | Zerotect is a small open source agent that monitors kernel logs to 30 | look for conclusive proof of memory-based exploits from the side-effects of those 31 | attacks. These appear in the form of process crashes (faults). Zerotect doesn't 32 | actively intercept network traffic, but instead, passively monitors kernel logs for 33 | anomalies. This means the attack surface of your servers isn't increased, and the stability 34 | of Zerotect doesn't affect the stability of anything else on the system. 35 | 36 | When anomalies are detected, Zerotect can report these anomalies to a variety of analytics 37 | tools. Our intent is to support a variety of tools, and integrations with those tools. Please 38 | file a Feature Request with examples of how you'd like to configure it and use it. 39 | 40 | ## Install Zerotect 41 | 42 | See [Installation](/install/README.md) for details on how to install/run Zerotect as a proper monitor in a production environment. 43 | 44 | To install quickly: 45 | 46 | ```.bash 47 | curl -s -L https://github.com/polyverse/zerotect/releases/latest/download/install.sh | sh 48 | ``` 49 | 50 | ## Partners/Integrations 51 | 52 | Zerotect by itself provides limited actionable value. The best value is derived when Zerotect is one of many signals that a larger monitoring/observability strategy is processing. This could be a SOC, a SIEM, an alerting system or just a simple log aggregator. 53 | 54 | To that end Zerotect supports a number of outbound integrations (i.e. where it sends its data) listed below. 55 | 56 | ### Micro Focus ArcSight 57 | 58 | [Zerotect on ArcSight Marketplace](https://marketplace.microfocus.com/arcsight/content/zerotect) 59 | 60 | Zerotect sends events to ArcSight through the Syslog SmartConnector. It is easy to configure in a single command. For more details read the [Administration Guide](/integrations/ArcSight/MF_Polyverse_ZeroTect_0.4_ArcSight_CEF_Integration_Guide_2020.pdf). 61 | 62 | ### PagerDuty 63 | 64 | [Zerotect integration with PagerDuty](https://www.pagerduty.com/integrations/zerotect/) 65 | 66 | Zerotect can send detected events to the PagerDuty Events API V2 through a single configuration. View the [PagerDuty Integration Guide](/integrations/PagerDuty/README.md) for details. 67 | 68 | ## Zerotect Log 69 | 70 | Zerotect stores activities in the log file located in /var/log/zerotect.log. Examine this log file for further investigation of potential attacks. 71 | 72 | The authoritative log format is defined in [schema.json](/reference/schema.json). 73 | 74 | You may use it to generate parsers. The schema contains documentation comments, explanations of fields, and so forth. 75 | 76 | ## Contributing 77 | 78 | We believe that open-source and robust community contributions make everyone safer, 79 | therefore we accept pretty much ALL contributions so long as: (a) They don't break an 80 | existing use-case or dependency and (b) They don't do something that is wildly out of scope of the project. 81 | 82 | Please read our [Code of Conduct](CODE_OF_CONDUCT.md), and our [Contribution Guidelines](CONTRIBUTING.md) before starting work on a new feature or bug. 83 | 84 | -------------------------------------------------------------------------------- /build-container.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | IMAGE="ghcr.io/polyverse/rust-dev-env" 4 | 5 | docker run -v /Users/archisgore/.rust_carco_cache:/root/.cargo/registry -v $PWD:/volume --rm -it --privileged $IMAGE 6 | -------------------------------------------------------------------------------- /install/README.md: -------------------------------------------------------------------------------- 1 | # Installing Zerotect 2 | 3 | This describes how zerotect can be obtained (securely) and configured so you can build your own recipes. 4 | 5 | ## Table of Contents 6 | 7 | * ["Trust Me" Quickstarts](#trust-me-quickstarts) 8 | * [First-Principles Install](#first-principles-install) 9 | * [Obtain the zerotect binary](#obtain-the-zerotect-binary) 10 | * [Download](#download) 11 | * [Zerotect Container image](#zerotect-container-image) 12 | * [Compile from source](#compile-from-source) 13 | * [Place zerotect binary in a durable location](#place-zerotect-binary-in-a-durable-location) 14 | * [Zerotect lifecycle](#zerotect-lifecycle) 15 | * [Run one zerotect per kernel](#run-one-zerotect-per-kernel) 16 | * [Automate zerotect lifecycle with your init system](#automate-zerotect-lifecycle-with-your-init-system) 17 | * [One-off direct execution](#one-off-direct-execution) 18 | * [As a background process](#as-a-background-process) 19 | * [In the Cloud-Native World](#in-the-cloud-native-world) 20 | * [1. DaemonSets](#1-daemonsets) 21 | * [2. Sidecars](#2-sidecars) 22 | * [Configure with a TOML file](#configure-with-a-toml-file) 23 | 24 | ## "Trust Me" Quickstarts 25 | 26 | Everything described in this document is encapsulated in scripted recipes for various distributions and init-systems. These are a great way to quickly install zerotect. 27 | 28 | As the scripts follow the curl pipe-to bash pattern, the rest of this document details how you can develop your own automation to deploy zerotect, depending on your level of trust (which may be zero trust). 29 | 30 | To install zerotect: 31 | 32 | ```.bash 33 | curl -s -L https://github.com/polyverse/zerotect/releases/latest/download/install.sh | sh 34 | ``` 35 | 36 | To uninstall zerotect: 37 | 38 | ```.bash 39 | curl -s -L https://github.com/polyverse/zerotect/releases/latest/download/install.sh | sudo sh -s -- --uninstall 40 | ``` 41 | 42 | To get all supported options: 43 | 44 | ```.bash 45 | curl -s -L https://github.com/polyverse/zerotect/releases/latest/download/install.sh | sudo sh -s -- --help 46 | ``` 47 | 48 | ## First-Principles Install 49 | 50 | This section deals with zerotect installation primitives (including if necessary, compiling it from source yourself.) This is especially important for security-conscious organizations for a complete auditable trail. 51 | 52 | ### Obtain the zerotect binary 53 | 54 | #### Download 55 | 56 | Zerotect executables are posted in [Github Releases](https://github.com/polyverse/zerotect/releases). 57 | 58 | The latest zerotect executable can be found here: [https://github.com/polyverse/zerotect/releases/latest/download/zerotect](https://github.com/polyverse/zerotect/releases/latest/download/zerotect) 59 | 60 | For transparency, you can study [.travis.yml](../.travis.yml) and the [build logs](https://travis-ci.org/github/polyverse/zerotect) to audit the pre-built binaries. 61 | 62 | #### Zerotect Container image 63 | 64 | As part of the zerotect build process, container images are also build and published on Github agailable here: 65 | 66 | [https://github.com/polyverse/zerotect/packages/199165](https://github.com/polyverse/zerotect/packages/199165) 67 | 68 | These are particularly useful for running as Sidecars (in Pods/Tasks) or DaemonSets (once-per-host). 69 | 70 | More information on this usage is found [In the Cloud-Native World](#in-the-cloud-native-world) section. 71 | 72 | #### Compile from source 73 | 74 | For complete audit and assurance, you may compile zerotect from scratch. Zerotect is built in [Rust](https://www.rust-lang.org/). 75 | 76 | On a system with [Rust build tools](https://www.rust-lang.org/tools/install) available: 77 | 78 | ```bash 79 | # clone this repository 80 | git clone https://github.com/polyverse/zerotect.git 81 | 82 | # Go to the repository root 83 | cd zerotect 84 | 85 | # Build 86 | cargo build 87 | ``` 88 | 89 | All regular rust tools/options recipes work - from cross-compilation, static linking, build profiles and so forth. You may build it any way you wish. 90 | 91 | ### Place zerotect binary in a durable location 92 | 93 | `DURABLE_ZEROTECT_LOCATION=/usr/local/bin` 94 | 95 | We recommend placing zerotect in the `/usr/local/bin` directory. Specifically since zerotect needs to run with higher privilege levels than a regular user, it is better to not have it under a user directory. 96 | 97 | ### zerotect lifecycle 98 | 99 | To ensure zerotect is running when you want it to run, and not running when you don't, you need to plan for some sort of lifecycle management. We present two main recommendations for running zerotect. 100 | 101 | #### Run one zerotect per kernel 102 | 103 | Since zerotect detects side-effects from the kernel, it is sufficient to run a single instance of zerotect for every Kernel. What this means is, traditional Linux "containers" (using cgroups and namespaces) do not need zerotect whtin them so long as either the host is running it, or there's a single container running it. 104 | 105 | However, "VM" containers such as Kata Containers, Firecracker VMs, and so forth will warrant a zerotect instance per container, since they would not share the same kernel. 106 | 107 | #### Automate zerotect lifecycle with your init system 108 | 109 | Zerotect needs to run once-per-kernel. Usually a kernel bootstraps and powers a rather complex system, and the system runs applications (and/or containers) on top of it. 110 | 111 | In such cases, zerotect should be installed as a manageable service directly on the system. 112 | 113 | Example 1: Some applications running on a host 114 | 115 | ```.text 116 | application application application zerotect process directly 117 | process 1 process 2 process 3 on kernel host/VM (not 118 | containerized) 119 | +--------------------------------------------------------------------------+ 120 | | | 121 | | Linux Kernel | 122 | | | 123 | +--------------------------------------------------------------------------+ 124 | ``` 125 | 126 | Example 2: Some containers running on a host 127 | 128 | ```.text 129 | +------------+ +------------+ +------------+ 130 | | | | | | | zerotect process directly 131 | | container1 | | container2 | | container3 | on kernel host/VM (not 132 | | | | | | | containerized) 133 | +------------+ +------------+ +------------+ 134 | +--------------------------------------------------------------------------+ 135 | | | 136 | | Linux Kernel | 137 | | | 138 | +--------------------------------------------------------------------------+ 139 | ``` 140 | 141 | Example 3: Some applications/containers coexisting on a host 142 | 143 | ```.text 144 | +---------------+ 145 | | | 146 | application | container 5 | application zerotect process directly 147 | process 1 | | process 3 on kernel host/VM (not 148 | +---------------+ containerized) 149 | +--------------------------------------------------------------------------+ 150 | | | 151 | | Linux Kernel | 152 | | | 153 | +--------------------------------------------------------------------------+ 154 | ``` 155 | 156 | In all these cases, it helps to run zerotect using the init system ([systemd](https://systemd.io/), [sysvinit](https://en.wikipedia.org/wiki/Init#SYSV), [upstart](http://upstart.ubuntu.com/), etc.) 157 | 158 | Now it is possible (and may even be desirable in some cases, such as running a [DaemonSet](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) on a [Kubernetes](https://kubernetes.io/) cluster) to run zerotect as a privileged container like the model below. 159 | 160 | The container itself is now a first-class serivce per host that must be managed through preferred container-management tooling. 161 | 162 | Example 4: zerotect as a privileged container 163 | 164 | ```.text 165 | +-----------------------------------------+ 166 | | | 167 | | zerotect in privileged container | 168 | | OR sufficient access to read | 169 | | /dev/kmsg | 170 | | | | 171 | +-----+-----------------------------------+ 172 | | 173 | | 174 | +--------------------------------------------------------------------------+ 175 | | | | 176 | | | | 177 | | v Linux Kernel | 178 | | /dev/kmsg | 179 | | | 180 | +--------------------------------------------------------------------------+ 181 | ``` 182 | 183 | This leaves one question open: How is zerotect run within the container itself? 184 | 185 | #### One-off direct execution 186 | 187 | This method is the recommended way to run zerotect in a container at entrypoint, with its maximum life being that of the container. This can be very useful for testing and validation of config options and parameters, as well as controlled on-demand execution. 188 | 189 | ```.bash 190 | $DURABLE_ZEROTECT_LOCATION/zerotect 191 | ``` 192 | 193 | Zerotect's lifetime is that of your current context (shell, user session or host). It will not automatically start up when a host/container starts. 194 | 195 | ##### As a background process 196 | 197 | You may push the one-off directly executed process to the background. A concrete example of this use is in [online demos](https://polyverse.com/learn), where zerotect doesn't need to be durable long-term. 198 | 199 | It also has application in a container where you can spawn the zerotect process before the main blocking process is started. Like thus: 200 | 201 | ```.bash 202 | $DURABLE_ZEROTECT_LOCATION/zerotect & 203 | ``` 204 | 205 | When iterating/testing, un-orchestrated Docker containers can be monitored quickly without extra scaffolding (such as Docker Desktop testing). 206 | 207 | #### In the Cloud-Native World 208 | 209 | If you're 100% Cloud-Native and your primitive is a Container, there are two primary ways to run zerotect as a container. 210 | 211 | ##### 1. DaemonSets 212 | 213 | Whenever you run containers orchestrated over "Nodes" (Machines that you see and know about, on top of which your containers run), as with [Kubernetes](https://kubernetes.io/), [Nomad](https://www.nomadproject.io/), [ECS](https://aws.amazon.com/ecs/), [CloudRun](https://cloud.google.com/run/), [OpenShift](https://www.openshift.com/) or even plain config management tools like [Ansible](https://www.ansible.com/)/[Chef](https://www.chef.io/)/[Puppet](https://puppet.com/) and use [OCI (Docker) Images](https://www.opencontainers.org/) as purely a packaging/deployment mechanism. 214 | 215 | Using the principle of [Run one zerotect per kernel](#run-one-zerotect-per-kernel), we recommend running the [zerotect container](#zerotect-container-image) as a [DaemonSet](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) or equivalent for your orchestrator. 216 | 217 | ##### 2. Sidecars 218 | 219 | The second is mostly a subset of the first use-case, but where containers are really VMs and do not share a kernel or you do not see the host ([Azure Container Instances](https://azure.microsoft.com/en-us/services/container-instances/), [AWS Fargate](https://aws.amazon.com/fargate/), etc.) 220 | 221 | There are a number of isolation projects that make VMs look and feel like containers. These include (but are not limited to) [KataContainers](https://katacontainers.io/) and [Firecracker](https://firecracker-microvm.github.io/). 222 | 223 | When multiple containers in a "[Pod](https://kubernetes.io/docs/concepts/workloads/pods/pod-overview/)" or "[Task](https://docs.aws.amazon.com/eks/latest/userguide/fargate-pod-configuration.html)", share the same kernel, it is useful to run zerotect as a sidecar within that Pod/Task. 224 | 225 | ### Configure with a TOML file 226 | 227 | While zerotect does take command-line parameters (documented in the main [README.md](../README.md)), it is not recommended to embed CLI-based configuration options in your init configuration. 228 | 229 | Instead, we recommend running it with a configuration file located at `/etc/zerotect/`: 230 | 231 | ```bash 232 | $DURABLE_ZEROTECT_LOCATION/zerotect --configfile /etc/zerotect/zerotect.toml 233 | ``` 234 | 235 | When using a configuration file, no other command-line options are supported. To see all options available in a configuration file, read the [Reference zerotect.toml file](../reference/zerotect.toml). 236 | -------------------------------------------------------------------------------- /install/install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # Copyright (c) 2020 Polyverse Corporation 3 | 4 | 5 | # ******************************************************************************************************************* 6 | # Init-system agonistic functions and variables 7 | # ******************************************************************************************************************* 8 | 9 | default_log_file="/var/log/zerotect.toml" 10 | 11 | zerotect_binary="zerotect" 12 | zerotect_remote_location="https://github.com/polyverse/zerotect/releases/latest/download" 13 | zerotect_local_location="/usr/local/bin" 14 | 15 | tomldir="/etc/zerotect" 16 | tomlfile="zerotect.toml" 17 | 18 | ensure_root() { 19 | 20 | # Ensuring we are root 21 | if [ "$EUID" = "0" ]; then 22 | # try the EUID method 23 | return 0 24 | elif [ "$EUID" = "" ] && [ "$(id -u)" = "0" ]; then 25 | # Sometimes EUID is not 0 (and not set), then use the older method 26 | return 0 27 | fi 28 | 29 | return 1 30 | } 31 | 32 | download_latest_zerotect() { 33 | 34 | #make sure local location exists 35 | if [ ! -d "$zerotect_local_location" ]; then 36 | printf " |--> $zerotect_local_location does not exist. Creating it...\n" 37 | mkdir -p -m 755 $zerotect_local_location 38 | fi 39 | 40 | printf " |--> Downloading the latest $zerotect_binary from $zerotect_remote_location, and saving into $zerotect_local_location\n" 41 | type wget 2>/dev/null 1>/dev/null 42 | if [ "$?" = "0" ]; then 43 | printf " |--> Using wget to download zerotect...\n" 44 | wget -q -O "$zerotect_local_location/$zerotect_binary" "$zerotect_remote_location/$zerotect_binary" 45 | else 46 | type curl 2>/dev/null 1>/dev/null 47 | if [ "$?" = "0" ]; then 48 | printf " |--> Using curl to download zerotect...\n" 49 | curl -s -L -o "$zerotect_local_location/$zerotect_binary" "$zerotect_remote_location/$zerotect_binary" 50 | else 51 | printf " |--> Neither curl nor wget found on the system. Unable to download zerotect binary.\n" 52 | exit 1 53 | fi 54 | fi 55 | 56 | printf " |--> Making zerotect executable...\n" 57 | chmod 755 "$zerotect_local_location/$zerotect_binary" 58 | } 59 | 60 | create_zerotect_conf() { 61 | 62 | POLYCORDER_AUTH_KEY="$1" 63 | POLYCORDER_NODE_ID="$2" 64 | LOG_FILE_PATH="$3" 65 | SYSLOG_DEFAULT="$4" 66 | 67 | printf " |--> Creating zerotect configuration\n" 68 | if [ ! -d "$tomldir" ]; then 69 | mkdir -p -m 755 "$tomldir" 70 | fi 71 | 72 | tomlcontents=$(printf "\n") 73 | 74 | if [ "$POLYCORDER_AUTH_KEY" = "" ] && [ "$LOG_FILE_PATH" = "" ] && [ "$SYSLOG_DEFAULT" = "" ] && [ "$PAGERDUTY_INTEGRATION_KEY" = "" ]; then 75 | LOG_FILE_PATH="$default_log_file" 76 | printf " |--> NOTE: No parameters provided, so defaulting to a log file at: $LOG_FILE_PATH\n" 77 | fi 78 | 79 | if [ "$PAGERDUTY_INTEGRATION_KEY" != "" ]; then 80 | printf " |--> Sending detected events to PagerDuty\n" 81 | tomlcontents=$(printf "${tomlcontents}\npagerduty_routing_key = '$PAGERDUTY_INTEGRATION_KEY'") 82 | tomlcontents=$(printf "${tomlcontents}\n ") 83 | fi 84 | 85 | tomlcontents=$(printf "${tomlcontents}\n[auto_configure]") 86 | tomlcontents=$(printf "${tomlcontents}\nexception_trace = true") 87 | tomlcontents=$(printf "${tomlcontents}\nfatal_signals = true") 88 | tomlcontents=$(printf "${tomlcontents}\nklog_include_timestamp = true") 89 | tomlcontents=$(printf "${tomlcontents}\n ") 90 | tomlcontents=$(printf "${tomlcontents}\n[monitor]") 91 | tomlcontents=$(printf "${tomlcontents}\ngobble_old_events = false") 92 | tomlcontents=$(printf "${tomlcontents}\n ") 93 | 94 | if [ "$POLYCORDER_AUTH_KEY" != "" ]; then 95 | printf " |--> Sending events to polycorder with authkey: $POLYCORDER_AUTH_KEY\n" 96 | tomlcontents=$(printf "${tomlcontents}\n[polycorder]") 97 | tomlcontents=$(printf "${tomlcontents}\nauth_key = '$POLYCORDER_AUTH_KEY'") 98 | if [ "$POLYCORDER_NODE_ID" != "" ]; then 99 | printf " |--> Assigning polycorder events to nodeid: $POLYCORDER_NODE_ID\n" 100 | tomlcontents=$(printf "${tomlcontents}\nnode_id = '$POLYCORDER_NODE_ID'") 101 | fi 102 | fi 103 | 104 | if [ "$SYSLOG_DEFAULT" != "" ]; then 105 | printf " |--> Sending events to syslog (in JSON format)\n" 106 | tomlcontents=$(printf "${tomlcontents}\n[syslog]") 107 | tomlcontents=$(printf "${tomlcontents}\ndestination = 'default'") 108 | tomlcontents=$(printf "${tomlcontents}\nformat = 'JSON'") 109 | fi 110 | 111 | if [ "$LOG_FILE_PATH" != "" ]; then 112 | printf " |--> Sending events to log file (in JSON format)\n" 113 | tomlcontents=$(printf "${tomlcontents}\n[logfile]") 114 | tomlcontents=$(printf "${tomlcontents}\nfilepath = '/var/log/zerotect.log'") 115 | tomlcontents=$(printf "${tomlcontents}\nformat = 'JSON'") 116 | fi 117 | 118 | tomlcontents=$(printf "${tomlcontents}\n ") 119 | 120 | #printf "Final configuration file contents are:\n$tomlcontents\n" 121 | printf "$tomlcontents" > $tomldir/$tomlfile 122 | chmod 644 $tomldir/$tomlfile 123 | printf " |--> Written configuration file to $tomldir/$tomlfile\n" 124 | } 125 | 126 | ensure_zerotect_running() { 127 | expected="$1" 128 | init_status="$2" 129 | sleep_seconds=$3 130 | 131 | if [ ! -z "$sleep_seconds" ]; then 132 | printf " |--> waiting upto $sleep_seconds seconds" 133 | pid=$(pgrep zerotect) 134 | while ([ $sleep_seconds -gt 0 ]) && (([ -z "$pid" ] && [ "$expected" = "yes" ]) || ([ ! -z "$pid" ] && [ "$expected" = "no" ])); do 135 | printf "." 136 | sleep 1 137 | sleep_seconds="$(($sleep_seconds-1))" 138 | pid=$(pgrep zerotect) 139 | done 140 | printf "\n" 141 | fi 142 | 143 | 144 | pid=$(pgrep zerotect) 145 | if [ -z "$pid" ]; then 146 | printf " |--> zerotect is not running in the background.\n" 147 | else 148 | printf " |--> zerotect is running in the background.\n" 149 | fi 150 | 151 | if ([ -z "$pid" ] && [ "$expected" = "yes" ]) || ([ ! -z "$pid" ] && [ "$expected" = "no" ]); then 152 | printf "That was unexpected. Service status:\n" 153 | $init_status 154 | exit 1 155 | fi 156 | } 157 | 158 | # ******************************************************************************************************************* 159 | # Upstart-specific functions and variables 160 | # ******************************************************************************************************************* 161 | 162 | upstart_job_dir="/etc/init" 163 | upstart_job_file="zerotect.conf" 164 | 165 | is_upstart() { 166 | printf " |--> looking for upstart... " 167 | initver=$(/sbin/init --version 2>&1) 168 | case "$initver" in 169 | *upstart*) 170 | printf "found.\n" 171 | ;; 172 | *) 173 | printf "not found.\n" 174 | return 1 175 | ;; 176 | esac 177 | 178 | printf " |--> Ensuring ($upstart_job_dir) exists..." 179 | if [ -d "$upstart_job_dir" ]; then 180 | printf "yes.\n" 181 | else 182 | printf "no.\n" 183 | printf " The directory $upstart_job_dir is required to configure the zerotect service.\n" 184 | printf " This script does not support any non-standard configurations and behaviors of upstart.\n" 185 | return 2 186 | fi 187 | 188 | return 0 189 | } 190 | 191 | 192 | upstart_create_job_file() { 193 | ## Trailing newlines are removed: https://unix.stackexchange.com/questions/446992/when-printing-a-variable-that-contains-newlines-why-is-the-last-newline-strippe 194 | upstart_job=$(printf "${upstart_job}\ndescription \"The polyverse monitoring agent for monitoring zero-day attack attempts\"") 195 | upstart_job=$(printf "${upstart_job}\n ") 196 | upstart_job=$(printf "${upstart_job}\nrespawn") 197 | upstart_job=$(printf "${upstart_job}\nrespawn limit unlimited") 198 | upstart_job=$(printf "${upstart_job}\n ") 199 | upstart_job=$(printf "${upstart_job}\nstart on runlevel [2345]") 200 | upstart_job=$(printf "${upstart_job}\nstop on runlevel [016]") 201 | upstart_job=$(printf "${upstart_job}\n ") 202 | upstart_job=$(printf "${upstart_job}\nexec $zerotect_local_location/$zerotect_binary --configfile $tomldir/$tomlfile") 203 | upstart_job=$(printf "${upstart_job}\n ") 204 | 205 | printf " |--> Writing $upstart_job_dir/$upstart_job_file file.\n" 206 | printf "$upstart_job" > $upstart_job_dir/$upstart_job_file 207 | 208 | printf " |--> Ensuring zerotect starts at system start\n" 209 | initctl reload-configuration 210 | 211 | printf " |--> Starting zerotect now\n" 212 | initctl start zerotect 213 | printf " |--> Restarting zerotect (if it was already started and this is a reinstall)\n" 214 | initctl restart zerotect 215 | } 216 | 217 | upstart_status() { 218 | initctl status zerotect 219 | } 220 | 221 | upstart_uninstall() { 222 | if [ -f "$upstart_job_dir/$upstart_job_file" ]; then 223 | printf " |--> Found zerotect job file: $upstart_job_dir/$upstart_job_file. Removing it (after stopping service).\n" 224 | initctl stop zerotect 225 | rm $upstart_job_dir/$upstart_job_file 226 | initctl reload-configuration 227 | fi 228 | 229 | if [ -f "$zerotect_local_location/$zerotect_binary" ]; then 230 | printf " |--> Found zerotect binary: $zerotect_local_location/$zerotect_binary. Removing it.\n" 231 | rm $zerotect_local_location/$zerotect_binary 232 | fi 233 | 234 | if [ -f "$tomldir/$tomlfile" ]; then 235 | printf " |--> Found toml configuration file: $tomldir/$tomlfile. Removing it.\n" 236 | rm $tomldir/$tomlfile 237 | printf " |--> Removing directory $tomldir\n" 238 | rmdir $tomldir 239 | fi 240 | } 241 | 242 | # ******************************************************************************************************************* 243 | # Systemd-specific functions and variables 244 | # ******************************************************************************************************************* 245 | 246 | systemd_unit_dir="/etc/systemd/system" 247 | systemd_unit_file="zerotect.service" 248 | 249 | is_systemd() { 250 | printf " |--> looking for systemd... " 251 | proc1=$(cat /proc/1/comm) 252 | if [ "$proc1" = "systemd" ]; then 253 | printf "found.\n" 254 | else 255 | printf "not found.\n" 256 | return 1 257 | fi 258 | 259 | printf " |--> Ensuring ($systemd_unit_dir) exists..." 260 | if [ -d "$systemd_unit_dir" ]; then 261 | printf "yes.\n" 262 | else 263 | printf "no.\n" 264 | printf " The directory $systemd_unit_dir is required to configure the zerotect service.\n" 265 | printf " This script does not support any non-standard configurations and behaviors of systemd.\n" 266 | return 2 267 | fi 268 | return 0 269 | } 270 | 271 | 272 | systemd_create_unit_file() { 273 | ## Trailing newlines are removed: https://unix.stackexchange.com/questions/446992/when-printing-a-variable-that-contains-newlines-why-is-the-last-newline-strippe 274 | systemd_unit=$(printf "[Unit]") 275 | systemd_unit=$(printf "${systemd_unit}\nDescription=The polyverse monitoring agent for monitoring zero-day attack attempts") 276 | systemd_unit=$(printf "${systemd_unit}\nRequires=network-online.target") 277 | systemd_unit=$(printf "${systemd_unit}\nAfter=network-online.target") 278 | systemd_unit=$(printf "${systemd_unit}\n ") 279 | systemd_unit=$(printf "${systemd_unit}\n[Service]") 280 | systemd_unit=$(printf "${systemd_unit}\nExecStart=$zerotect_local_location/$zerotect_binary --configfile $tomldir/$tomlfile") 281 | systemd_unit=$(printf "${systemd_unit}\n ") 282 | systemd_unit=$(printf "${systemd_unit}\n[Install]") 283 | systemd_unit=$(printf "${systemd_unit}\nWantedBy=multi-user.target") 284 | systemd_unit=$(printf "${systemd_unit}\nWantedBy=graphical.target") 285 | systemd_unit=$(printf "${systemd_unit}\n ") 286 | 287 | printf " |--> Writing $systemd_unit_dir/$systemd_unit_file file.\n" 288 | printf "$systemd_unit" > $systemd_unit_dir/$systemd_unit_file 289 | 290 | printf " |--> Ensuring zerotect starts at system start\n" 291 | systemctl enable zerotect 292 | 293 | printf " |--> Starting zerotect now\n" 294 | systemctl start zerotect 295 | printf " |--> Restarting zerotect (if it was already started and this is a reinstall)\n" 296 | systemctl restart zerotect 297 | } 298 | 299 | systemd_status() { 300 | systemctl status zerotect 301 | } 302 | 303 | systemd_uninstall() { 304 | if [ -f "$systemd_unit_dir/$systemd_unit_file" ]; then 305 | printf " |--> Found zerotect service unit: $systemd_unit_dir/$systemd_unit_file. Removing it (after stopping service).\n" 306 | systemctl stop zerotect 307 | systemctl disable zerotect 308 | rm $systemd_unit_dir/$systemd_unit_file 309 | fi 310 | 311 | if [ -f "$zerotect_local_location/$zerotect_binary" ]; then 312 | printf " |--> Found zerotect binary: $zerotect_local_location/$zerotect_binary. Removing it.\n" 313 | rm $zerotect_local_location/$zerotect_binary 314 | fi 315 | 316 | if [ -f "$tomldir/$tomlfile" ]; then 317 | printf " |--> Found toml configuration file: $tomldir/$tomlfile. Removing it.\n" 318 | rm $tomldir/$tomlfile 319 | printf " |--> Removing directory $tomldir\n" 320 | rmdir $tomldir 321 | fi 322 | } 323 | 324 | # ******************************************************************************************************************* 325 | # OpenRC-specific functions and variables 326 | # ******************************************************************************************************************* 327 | 328 | openrc_init_dir="/etc/init.d" 329 | openrc_init_file="zerotect" 330 | 331 | is_openrc() { 332 | printf " |--> looking for OpenRC... " 333 | proc1=$(cat /proc/1/comm) 334 | if [ -r /run/openrc/softlevel ]; then 335 | printf "found.\n" 336 | else 337 | printf "not found.\n" 338 | return 1 339 | fi 340 | 341 | printf " |--> Ensuring ($openrc_init_dir) exists..." 342 | if [ -d "$openrc_init_dir" ]; then 343 | printf "yes.\n" 344 | else 345 | printf "no.\n" 346 | printf " The directory $openrc_init_dir is required to configure the zerotect service.\n" 347 | printf " This script does not support any non-standard configurations and behaviors of OpenRC.\n" 348 | return 2 349 | fi 350 | return 0 351 | } 352 | 353 | openrc_create_init_file() { 354 | ## See: https://github.com/OpenRC/openrc/blob/master/service-script-guide.md 355 | ## Trailing newlines are removed: https://unix.stackexchange.com/questions/446992/when-printing-a-variable-that-contains-newlines-why-is-the-last-newline-strippe 356 | openrc_init=$(printf "#!/sbin/openrc-run") 357 | openrc_init=$(printf "${openrc_init}\n ") 358 | openrc_init=$(printf "${openrc_init}\ncommand=\"$zerotect_local_location/$zerotect_binary\"") 359 | openrc_init=$(printf "${openrc_init}\ncommand_args=\"--configfile=$tomldir/$tomlfile\"") 360 | # But what if the daemon isn't so well behaved? What if it doesn't know how to background 361 | # itself or create a pidfile? If it can do neither, then use, 362 | openrc_init=$(printf "${openrc_init}\ncommand_background=true") 363 | openrc_init=$(printf "${openrc_init}\npidfile=\"/run/$zerotect_binary.pid\"") 364 | openrc_init=$(printf "${openrc_init}\n ") 365 | # Depend on network being up 366 | openrc_init=$(printf "${openrc_init}\n ") 367 | openrc_init=$(printf "${openrc_init}\ndepend() {") 368 | openrc_init=$(printf "${openrc_init}\n need net") 369 | openrc_init=$(printf "${openrc_init}\n}") 370 | openrc_init=$(printf "${openrc_init}\n ") 371 | 372 | printf " |--> Writing $openrc_init_dir/$openrc_init_file file.\n" 373 | printf "$openrc_init" > $openrc_init_dir/$openrc_init_file 374 | 375 | printf " |--> Making zerotect init file executable (required for OpenRC)\n" 376 | chmod a+x $openrc_init_dir/$openrc_init_file 377 | 378 | printf " |--> Ensuring zerotect starts at system start\n" 379 | # Add to 'default' runlevel 380 | rc-update add zerotect default 381 | 382 | printf " |--> Starting zerotect now\n" 383 | rc-service zerotect start 384 | printf " |--> Restarting zerotect (if it was already started and this is a reinstall)\n" 385 | rc-service zerotect restart 386 | } 387 | 388 | openrc_status() { 389 | rc-service zerotect status 390 | } 391 | 392 | openrc_uninstall() { 393 | if [ -f "$openrc_init_dir/$openrc_init_file" ]; then 394 | printf " |--> Found zerotect init file: $openrc_init_dir/$openrc_init_file. Removing it (after stopping service).\n" 395 | rc-service zerotect stop 396 | rc-update del zerotect default 397 | rm $openrc_init_dir/$openrc_init_file 398 | fi 399 | 400 | if [ -f "$zerotect_local_location/$zerotect_binary" ]; then 401 | printf " |--> Found zerotect binary: $zerotect_local_location/$zerotect_binary. Removing it.\n" 402 | rm $zerotect_local_location/$zerotect_binary 403 | fi 404 | 405 | if [ -f "$tomldir/$tomlfile" ]; then 406 | printf " |--> Found toml configuration file: $tomldir/$tomlfile. Removing it.\n" 407 | rm $tomldir/$tomlfile 408 | printf " |--> Removing directory $tomldir\n" 409 | rmdir $tomldir 410 | fi 411 | } 412 | 413 | 414 | 415 | # ******************************************************************************************************************* 416 | # Usage 417 | # ******************************************************************************************************************* 418 | 419 | print_usage() { 420 | printf "\n" 421 | printf "Usage:\n" 422 | printf " $0 [arguments]\n" 423 | printf "\n" 424 | printf " -p|--polycorder : The polycorder auth key allows zerotect to send detected events to Polycorder,\n" 425 | printf " the hosted analytics platform available in the Polyverse Account dashboard.\n" 426 | printf " -n|--polycorder-node-id : An optional node identifier/discriminator which would allow analytics to\n" 427 | printf " differentiate this particular node's events. (requires polycorder auth key.)\n" 428 | printf " -l|--log-file : Writes zerotect logs to file provided at path.\n" 429 | printf " -s|--syslog : Sends zerotect logs to syslog at standard Unix sockets, i.e. /dev/log and\n" 430 | printf " /var/run/syslog in that order, TCP port (601) or UDP port (514).\n" 431 | printf " --pagerduty : The PagerDuty integration key allows zerotect to send detected events to PagerDuty.\n" 432 | printf " --uninstall : Removes zerotect from this system.\n" 433 | printf "\n NOTE: If no arguments are provided, '--log-file /var/log/zerotect.log' is assumed.\n" 434 | } 435 | 436 | # ******************************************************************************************************************* 437 | # Not functions... this is main execution thread... 438 | # ******************************************************************************************************************* 439 | 440 | printf "\n" 441 | 442 | # parse arguments.... 443 | # copied from: https://medium.com/@Drew_Stokes/bash-argument-parsing-54f3b81a6a8f 444 | PARAMS="" 445 | while [ "$#" -gt 0 ]; do 446 | case "$1" in 447 | -p|--polycorder) 448 | POLYCORDER_AUTH_KEY=$2 449 | shift 2 450 | ;; 451 | -n|--polycorder-node-id) 452 | POLYCORDER_NODE_ID=$2 453 | shift 2 454 | ;; 455 | -l|--log-file) 456 | LOG_FILE_PATH=$2 457 | shift 2 458 | ;; 459 | -s|--syslog) 460 | SYSLOG_DEFAULT=true 461 | shift 462 | ;; 463 | --pagerduty) 464 | PAGERDUTY_INTEGRATION_KEY=$2 465 | shift 2 466 | ;; 467 | -h|--help) 468 | printf "\n" 469 | printf "Configures and installs Zerotect as a daemon on this host based on the init system running on it. This script does not\n" 470 | printf "provide all possible configuration options, and instead provides a smooth, opinionated set of defaults. To configure\n" 471 | printf "zerotect with a finer granularity, you may either modify the file located at: $tomldir/$tomlfile,\n" 472 | printf "or download and run the zerotect executable manually.\n" 473 | print_usage 474 | exit 1 475 | shift 476 | ;; 477 | --uninstall) 478 | UNINSTALL=true 479 | shift 480 | ;; 481 | -*|--*=) # unsupported flags 482 | printf "Error: Unsupported flag $1\n" >&2 483 | print_usage 484 | exit 1 485 | ;; 486 | *) # preserve positional arguments 487 | PARAMS="$PARAMS $1" 488 | shift 489 | ;; 490 | esac 491 | done # set positional arguments in their proper place 492 | eval set -- "$PARAMS" 493 | 494 | printf "\n" 495 | printf "Zerotect installer\n\n" 496 | printf "==> Step 1/6: Ensuring we're running as root..." 497 | # ensure we're running as root 498 | ensure_root 499 | if [ "$?" = "0" ]; then 500 | printf "yes.\n" 501 | else 502 | printf "no.\n" 503 | printf " This script must be run as root because it needs to reliably detect the init system,\n" 504 | printf " and be able to install the zerotect service using the appropriate install script.\n" 505 | exit 1 506 | fi 507 | 508 | printf "==> Step 2/6: Detecting Init System...\n" 509 | 510 | # Set the helper functions based on which init system we detect 511 | is_systemd 512 | if [ "$?" = "0" ]; then 513 | create_init_file="systemd_create_unit_file" 514 | init_status="systemd_status" 515 | uninstall="systemd_uninstall" 516 | else 517 | is_openrc 518 | if [ "$?" = "0" ]; then 519 | create_init_file="openrc_create_init_file" 520 | init_status="openrc_status" 521 | uninstall="openrc_uninstall" 522 | else 523 | is_upstart 524 | if [ "$?" = "0" ]; then 525 | create_init_file="upstart_create_job_file" 526 | init_status="upstart_status" 527 | uninstall="upstart_uninstall" 528 | else 529 | printf " |--> No more init systems supported. Zerotect does not have a recipe for your system.\n" 530 | exit 0 531 | fi 532 | fi 533 | fi 534 | 535 | if [ "$UNINSTALL" != "" ]; then 536 | printf "==> Step 4/6: No step for when uninstalling.\n" 537 | printf "==> Step 5/6: Uninstalling zerotect...\n" 538 | $uninstall 539 | printf "==> Step 6/6: Ensure zerotect is not running...\n" 540 | ensure_zerotect_running "no" $init_status 60 541 | else 542 | printf "==> Step 3/6: Downloading zerotect binary...\n" 543 | download_latest_zerotect 544 | 545 | printf "==> Step 4/6: Creating zerotect configuration file...\n" 546 | create_zerotect_conf "$POLYCORDER_AUTH_KEY" "$POLYCORDER_NODE_ID" "$LOG_FILE_PATH" "$SYSLOG_DEFAULT" 547 | 548 | printf "==> Step 5/6: Adding zerotect to init system...\n" 549 | $create_init_file 550 | 551 | printf "==> Step 6/6: Ensure zerotect is running...\n" 552 | ensure_zerotect_running "yes" $init_status 60 553 | fi 554 | -------------------------------------------------------------------------------- /integrations/ArcSight/MF_ArcSight_and_Polyverse_ZeroTect_SolutionBrief_v1.1.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/polyverse/zerotect/13bab3ecaa9f9ef918d152f6ccef79650d6e8a28/integrations/ArcSight/MF_ArcSight_and_Polyverse_ZeroTect_SolutionBrief_v1.1.pdf -------------------------------------------------------------------------------- /integrations/ArcSight/MF_Polyverse_ZeroTect_0.4_ArcSight_CEF_Integration_Guide_2020.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/polyverse/zerotect/13bab3ecaa9f9ef918d152f6ccef79650d6e8a28/integrations/ArcSight/MF_Polyverse_ZeroTect_0.4_ArcSight_CEF_Integration_Guide_2020.pdf -------------------------------------------------------------------------------- /integrations/ArcSight/MicroFocus_ArcSight_CEF_FieldMappings.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/polyverse/zerotect/13bab3ecaa9f9ef918d152f6ccef79650d6e8a28/integrations/ArcSight/MicroFocus_ArcSight_CEF_FieldMappings.xlsx -------------------------------------------------------------------------------- /integrations/PagerDuty/README.md: -------------------------------------------------------------------------------- 1 | # PagerDuty Integration 2 | 3 | ## PagerDuty + Zerotect Integration Benefits 4 | 5 | * Detect memory-based attacks in progress 6 | 7 | ## How it Works 8 | 9 | Zerotect looks for faults (such as segmentation fault, protection fault, etc.) in the [kernel log buffer](https://www.kernel.org/doc/html/latest/core-api/printk-basics.html) and analyzes them in real-time to detect memory-based attacks in progress. When an attack is detected, an Event is raised in PagerDuty. 10 | 11 | ## Requirements 12 | 13 | * PagerDuty integrations require an Admin base role for account authorization. If you do not have this role, please reach out to an Admin or Account Owner within your organization to configure the integration. 14 | * Zerotect requires an integration key. Integration keys are generated by creating a new service or by creating a new integration for an existing service. 15 | 16 | ## Support 17 | 18 | If you need help with this integration, please contact support@polyverse.com. 19 | 20 | ## Integration Walkthrough 21 | 22 | ### In PagerDuty 23 | 24 | #### Integrating With a PagerDuty Service 25 | 26 | 1. From the **Configuration** menu, select **Services**. 27 | 2. There are two ways to add an integration to a service: 28 | * **If you are adding your integration to an existing service**: Click the **name** of the service you want to add the integration to. Then, select the **Integrations** tab and click the **New Integration** button. 29 | * **If you are creating a new service for your integration**: Please read our documentation in section [Configuring Services and Integrations](https://support.pagerduty.com/docs/services-and-integrations#section-configuring-services-and-integrations) and follow the steps outlined in the [Create a New Service](https://support.pagerduty.com/docs/services-and-integrations#section-create-a-new-service) section, selecting **Zerotect** as the **Integration Type** in step 4. Continue with the In **Zerotect** section (below) once you have finished these steps. 30 | 3. Enter an **Integration Name** in the format `monitoring-tool-service-name` (e.g. **Zerotect**-Attack-Detections) and select **Zerotect** from the Integration Type menu. 31 | 4. Click the **Add Integration** button to save your new integration. You will be redirected to the Integrations tab for your service. 32 | 5. An **Integration Key** will be generated on this screen. Keep this key saved in a safe place, as it will be used when you configure the integration with **Zerotect** in the next section. 33 | ![PagerDuty integration key screenshot](pagerduty-integration-key.png) 34 | 35 | ### In **ZeroTect** 36 | 37 | 1. Install Zerotect on any host with the following command: 38 | 39 | ```.bash 40 | curl -s -L https://github.com/polyverse/zerotect/releases/latest/download/install.sh | sudo sh -s -- --pagerduty 41 | ``` 42 | 43 | #### How to Uninstall 44 | 45 | 1. Uninstall Zerotect by running this command: 46 | 47 | ```.bash 48 | curl -s -L https://github.com/polyverse/zerotect/releases/latest/download/install.sh | sudo sh -s -- --uninstall 49 | ``` 50 | 51 | ## Testing PagerDuty integration 52 | 53 | [Polyverse](https://polyverse.com) hosts an [online blind-rop attack demo](https://polyverse.com/learn/blind-rop/), which can be used to test incidents being raised in PagerDuty. 54 | 55 | Note that this demo intentionally raises a LOT of incidents (it uses half a dozen attack techniques in one demo.) In reality you won't see more than one or two incidents as most zero-day attacks don't progress that rapidly and don't use all the techniques. 56 | 57 | This is how you can run your own Blind-ROP attack and alert it in PagerDuty: 58 | 59 | 1. Go to this URL: 60 | [https://polyverse.com/learn/blind-rop/](https://polyverse.com/learn/blind-rop/) 61 | 62 | 2. Press “Start Scenario”: 63 | ![PagerDuty Demo Step 2](pagerduty-demo-step-2.png) 64 | 65 | 3. Run the install command: `curl -s -L https://github.com/polyverse/zerotect/releases/latest/download/install.sh | sudo sh -s -- --pagerduty ` 66 | ![PagerDuty Demo Step 3](pagerduty-demo-step-3.png) 67 | 68 | 4. Run: `systemctl restart zerotect` 69 | ![PagerDuty Demo Step 4](pagerduty-demo-step-4.png) 70 | 71 | 5. Then start a vulnerable nginx by clicking on that gray text on the left side: 72 | ![PagerDuty Demo Step 5](pagerduty-demo-step-5.png) 73 | 74 | 6. Scroll down and press continue… 75 | ![PagerDuty Demo Step 6](pagerduty-demo-step-6.png) 76 | 77 | 7. Start the attack by clicking on this gray area on the left 78 | ![PagerDuty Demo Step 7](pagerduty-demo-step-7.png) 79 | 80 | 8. Watch the attack happen... 81 | ![PagerDuty Demo Step 8](pagerduty-demo-step-8.png) 82 | 83 | And at this point just wait…. And the attack will generate alerts. 84 | 85 | Every once in a while the step 2 will say, “Not Vulnerable”, so just repeat the steps in that case. 86 | -------------------------------------------------------------------------------- /integrations/PagerDuty/pagerduty-demo-step-2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/polyverse/zerotect/13bab3ecaa9f9ef918d152f6ccef79650d6e8a28/integrations/PagerDuty/pagerduty-demo-step-2.png -------------------------------------------------------------------------------- /integrations/PagerDuty/pagerduty-demo-step-3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/polyverse/zerotect/13bab3ecaa9f9ef918d152f6ccef79650d6e8a28/integrations/PagerDuty/pagerduty-demo-step-3.png -------------------------------------------------------------------------------- /integrations/PagerDuty/pagerduty-demo-step-4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/polyverse/zerotect/13bab3ecaa9f9ef918d152f6ccef79650d6e8a28/integrations/PagerDuty/pagerduty-demo-step-4.png -------------------------------------------------------------------------------- /integrations/PagerDuty/pagerduty-demo-step-5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/polyverse/zerotect/13bab3ecaa9f9ef918d152f6ccef79650d6e8a28/integrations/PagerDuty/pagerduty-demo-step-5.png -------------------------------------------------------------------------------- /integrations/PagerDuty/pagerduty-demo-step-6.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/polyverse/zerotect/13bab3ecaa9f9ef918d152f6ccef79650d6e8a28/integrations/PagerDuty/pagerduty-demo-step-6.png -------------------------------------------------------------------------------- /integrations/PagerDuty/pagerduty-demo-step-7.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/polyverse/zerotect/13bab3ecaa9f9ef918d152f6ccef79650d6e8a28/integrations/PagerDuty/pagerduty-demo-step-7.png -------------------------------------------------------------------------------- /integrations/PagerDuty/pagerduty-demo-step-8.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/polyverse/zerotect/13bab3ecaa9f9ef918d152f6ccef79650d6e8a28/integrations/PagerDuty/pagerduty-demo-step-8.png -------------------------------------------------------------------------------- /integrations/PagerDuty/pagerduty-integration-key.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/polyverse/zerotect/13bab3ecaa9f9ef918d152f6ccef79650d6e8a28/integrations/PagerDuty/pagerduty-integration-key.png -------------------------------------------------------------------------------- /reference/how_zerotect_works.drawio: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | 69 | 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | 80 | 81 | 82 | 83 | 84 | 85 | 86 | 87 | 88 | 89 | 90 | 91 | 92 | 93 | 94 | 95 | 96 | 97 | 98 | 99 | 100 | 101 | -------------------------------------------------------------------------------- /reference/schema.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "http://json-schema.org/draft-07/schema#", 3 | "title": "Version", 4 | "description": "Event is the complete structure that Polycorder (Polyverse-hosted zero-day detection service) understands. This structure is also the reference schema/format for all detect-efforts.\n\nAs such, it is encouraged to have many detectors that emit data in this structure.\n\nDifferent implementations of the structure may very. Various fields may come or go.\n\nAll parsers are encouraged to first test the \"Version\" field and then parse the correct structure. The field `version` is guaranteed to exist on ALL versions and instances of Event. Any structure/data that does not contain the version field, is considered invalid.", 5 | "anyOf": [ 6 | { 7 | "description": "Version is guaranteed to exist. All other fields may change or not exist, and it is recommended to use a different version when making breaking changes to all other fields. It allows parsers to test on version and determine if they know what to do with the rest. For this particular variant, set DeviceVersion to a fixed value \"V1\"", 8 | "type": "object", 9 | "required": [ 10 | "event", 11 | "timestamp", 12 | "version" 13 | ], 14 | "properties": { 15 | "event": { 16 | "description": "Platform records fields specific to a specific mechanism/platform.", 17 | "allOf": [ 18 | { 19 | "$ref": "#/definitions/EventType" 20 | } 21 | ] 22 | }, 23 | "hostname": { 24 | "type": [ 25 | "string", 26 | "null" 27 | ] 28 | }, 29 | "timestamp": { 30 | "description": "This is universal and important for all events. They occur at a time.", 31 | "type": "string", 32 | "format": "date-time" 33 | }, 34 | "version": { 35 | "type": "string", 36 | "enum": [ 37 | "V1" 38 | ] 39 | } 40 | } 41 | } 42 | ], 43 | "definitions": { 44 | "EventType": { 45 | "description": "The Platform this event originated on.", 46 | "anyOf": [ 47 | { 48 | "description": "An analytics-detected internal event based on other events", 49 | "type": "object", 50 | "required": [ 51 | "justification", 52 | "message", 53 | "procname", 54 | "register", 55 | "type" 56 | ], 57 | "properties": { 58 | "justification": { 59 | "description": "The raw events which justify this analytics event.", 60 | "allOf": [ 61 | { 62 | "$ref": "#/definitions/RegisterProbeJustification" 63 | } 64 | ] 65 | }, 66 | "message": { 67 | "description": "What does this probe mean? What interpretation could this particular register probe have?", 68 | "type": "string" 69 | }, 70 | "procname": { 71 | "type": "string" 72 | }, 73 | "register": { 74 | "description": "Which register was being probed?", 75 | "type": "string" 76 | }, 77 | "type": { 78 | "type": "string", 79 | "enum": [ 80 | "RegisterProbe" 81 | ] 82 | } 83 | } 84 | }, 85 | { 86 | "description": "The Linux platform and event details in the Linux context A Kernel Trap event - the kernel stops process execution for attempting something stupid", 87 | "type": "object", 88 | "required": [ 89 | "errcode", 90 | "facility", 91 | "ip", 92 | "level", 93 | "pid", 94 | "procname", 95 | "sp", 96 | "trap", 97 | "type" 98 | ], 99 | "properties": { 100 | "errcode": { 101 | "description": "The error code for the trap", 102 | "allOf": [ 103 | { 104 | "$ref": "#/definitions/SegfaultErrorCode" 105 | } 106 | ] 107 | }, 108 | "facility": { 109 | "description": "A Log-facility - most OSes would have one, but this is Linux-specific for now", 110 | "allOf": [ 111 | { 112 | "$ref": "#/definitions/LogFacility" 113 | } 114 | ] 115 | }, 116 | "file": { 117 | "description": "(Optional) File in which the trap occurred (could be the main executable or library).", 118 | "type": [ 119 | "string", 120 | "null" 121 | ] 122 | }, 123 | "ip": { 124 | "description": "Instruction Pointer (what memory address was executing)", 125 | "type": "integer", 126 | "format": "uint", 127 | "minimum": 0.0 128 | }, 129 | "level": { 130 | "description": "The type of kernel trap triggered A Log-level for this event - was it critical?", 131 | "allOf": [ 132 | { 133 | "$ref": "#/definitions/LogLevel" 134 | } 135 | ] 136 | }, 137 | "pid": { 138 | "description": "Process ID", 139 | "type": "integer", 140 | "format": "uint", 141 | "minimum": 0.0 142 | }, 143 | "procname": { 144 | "description": "Name of the process in which the trap occurred", 145 | "type": "string" 146 | }, 147 | "sp": { 148 | "description": "Stack Pointer", 149 | "type": "integer", 150 | "format": "uint", 151 | "minimum": 0.0 152 | }, 153 | "trap": { 154 | "$ref": "#/definitions/KernelTrapType" 155 | }, 156 | "type": { 157 | "type": "string", 158 | "enum": [ 159 | "LinuxKernelTrap" 160 | ] 161 | }, 162 | "vmasize": { 163 | "description": "(Optional) The Virtual Memory Size of this file's mapping.", 164 | "type": [ 165 | "integer", 166 | "null" 167 | ], 168 | "format": "uint", 169 | "minimum": 0.0 170 | }, 171 | "vmastart": { 172 | "description": "(Optional) The Virtual Memory Address where this file (main executable or library) was mapped (with ASLR could be arbitrary).", 173 | "type": [ 174 | "integer", 175 | "null" 176 | ], 177 | "format": "uint", 178 | "minimum": 0.0 179 | } 180 | } 181 | }, 182 | { 183 | "description": "A Fatal Signal from the process because the process did something stupid", 184 | "type": "object", 185 | "required": [ 186 | "facility", 187 | "level", 188 | "signal", 189 | "stack_dump", 190 | "type" 191 | ], 192 | "properties": { 193 | "facility": { 194 | "description": "A Log-facility - most OSes would have one, but this is Linux-specific for now", 195 | "allOf": [ 196 | { 197 | "$ref": "#/definitions/LogFacility" 198 | } 199 | ] 200 | }, 201 | "level": { 202 | "description": "A Log-level for this event - was it critical?", 203 | "allOf": [ 204 | { 205 | "$ref": "#/definitions/LogLevel" 206 | } 207 | ] 208 | }, 209 | "signal": { 210 | "description": "The type of Fatal triggered", 211 | "allOf": [ 212 | { 213 | "$ref": "#/definitions/FatalSignalType" 214 | } 215 | ] 216 | }, 217 | "stack_dump": { 218 | "description": "An Optional Stack Dump if one was found and parsable. Do not place these in CEF format since ArcSight/Microfocus needs explicit field mappings. No telling what a real dump of registers/values might be contained here. Best to be safe. If you care about these values, use JSON/Text logging.", 219 | "type": "object", 220 | "additionalProperties": { 221 | "type": "string" 222 | } 223 | }, 224 | "type": { 225 | "type": "string", 226 | "enum": [ 227 | "LinuxFatalSignal" 228 | ] 229 | } 230 | } 231 | }, 232 | { 233 | "description": "Information about a suppressed callback i.e. when a particular type of error happens so much it is suppressed 'n' times.\n\nThis captures what the log was, and how many times it was suppressed.\n\nThis is a crucial data point because under Blind ROP attacks an error might happen thousands of times but may only be logged once, with all the remaining attempts being suppressed.", 234 | "type": "object", 235 | "required": [ 236 | "count", 237 | "facility", 238 | "function_name", 239 | "level", 240 | "type" 241 | ], 242 | "properties": { 243 | "count": { 244 | "description": "Number of times it was suppressed.", 245 | "type": "integer", 246 | "format": "uint", 247 | "minimum": 0.0 248 | }, 249 | "facility": { 250 | "description": "A Log-facility - most OSes would have one, but this is Linux-specific for now", 251 | "allOf": [ 252 | { 253 | "$ref": "#/definitions/LogFacility" 254 | } 255 | ] 256 | }, 257 | "function_name": { 258 | "description": "Name of the function being suppressed/folded.", 259 | "type": "string" 260 | }, 261 | "level": { 262 | "description": "A Log-level for this event - was it critical?", 263 | "allOf": [ 264 | { 265 | "$ref": "#/definitions/LogLevel" 266 | } 267 | ] 268 | }, 269 | "type": { 270 | "type": "string", 271 | "enum": [ 272 | "LinuxSuppressedCallback" 273 | ] 274 | } 275 | } 276 | }, 277 | { 278 | "description": "This is a zerotect-internal event. zerotect can be commanded to set and ensure certain configuration settings to capture events, such as enabling kernel fatal-signals, or core dumps.\n\nThis event is triggered when, after zerotect has configured a machine as commanded, the configuration later mismatched. It means someone attempted to undo those changes.\n\nThis event usually tells an observer they may not be seeing other events because they may be disabled.", 279 | "type": "object", 280 | "required": [ 281 | "expected_value", 282 | "key", 283 | "observed_value", 284 | "type" 285 | ], 286 | "properties": { 287 | "expected_value": { 288 | "description": "The value zerotect configured and thus expected.", 289 | "type": "string" 290 | }, 291 | "key": { 292 | "description": "The key in question whose values mismatched.", 293 | "type": "string" 294 | }, 295 | "observed_value": { 296 | "description": "The value zerotect observed.", 297 | "type": "string" 298 | }, 299 | "type": { 300 | "type": "string", 301 | "enum": [ 302 | "ConfigMismatch" 303 | ] 304 | } 305 | } 306 | } 307 | ] 308 | }, 309 | "FatalSignalType": { 310 | "description": "The type of Fatal Signal detected Comprehensive list of POSIX signals in the linux kernel can be found int he kernel source tree: https://github.com/torvalds/linux/blob/master/include/linux/signal.h#L339\n\nA bit more detail may be found in the man-pages: http://man7.org/linux/man-pages/man7/signal.7.html", 311 | "type": "string", 312 | "enum": [ 313 | "SIGHUP", 314 | "SIGINT", 315 | "SIGQUIT", 316 | "SIGILL", 317 | "SIGTRAP", 318 | "SIGIOT", 319 | "SIGBUS", 320 | "SIGFPE", 321 | "SIGKILL", 322 | "SIGUSR1", 323 | "SIGSEGV", 324 | "SIGUSR2", 325 | "SIGPIPE", 326 | "SIGALRM", 327 | "SIGTERM", 328 | "SIGSTKFLT", 329 | "SIGCHLD", 330 | "SIGCONT", 331 | "SIGSTOP", 332 | "SIGTSTP", 333 | "SIGTTIN", 334 | "SIGTTOU", 335 | "SIGURG", 336 | "SIGXCPU", 337 | "SIGXFSZ", 338 | "SIGVTALRM", 339 | "SIGPROF", 340 | "SIGWINCH", 341 | "SIGIO", 342 | "SIGPWR" 343 | ] 344 | }, 345 | "KernelTrapType": { 346 | "description": "The types of kernel traps understood", 347 | "anyOf": [ 348 | { 349 | "description": "This is type zerotect doesn't know how to parse. So it captures and stores the string description.", 350 | "type": "object", 351 | "required": [ 352 | "description", 353 | "type" 354 | ], 355 | "properties": { 356 | "description": { 357 | "type": "string" 358 | }, 359 | "type": { 360 | "type": "string", 361 | "enum": [ 362 | "Generic" 363 | ] 364 | } 365 | } 366 | }, 367 | { 368 | "description": "Segfault occurs when an invalid memory access is performed (writing to read-only memory, executing non-executable memory, etc.)", 369 | "type": "object", 370 | "required": [ 371 | "location", 372 | "type" 373 | ], 374 | "properties": { 375 | "location": { 376 | "type": "integer", 377 | "format": "uint", 378 | "minimum": 0.0 379 | }, 380 | "type": { 381 | "type": "string", 382 | "enum": [ 383 | "Segfault" 384 | ] 385 | } 386 | } 387 | }, 388 | { 389 | "description": "Invalid Opcode occurs when the processor doesn't understand an opcode. This usually occurs when execution jumps to an otherwise data segment, or in the wrong byte within an instruction.", 390 | "type": "object", 391 | "required": [ 392 | "type" 393 | ], 394 | "properties": { 395 | "type": { 396 | "type": "string", 397 | "enum": [ 398 | "InvalidOpcode" 399 | ] 400 | } 401 | } 402 | }, 403 | { 404 | "type": "object", 405 | "required": [ 406 | "type" 407 | ], 408 | "properties": { 409 | "type": { 410 | "type": "string", 411 | "enum": [ 412 | "GeneralProtectionFault" 413 | ] 414 | } 415 | } 416 | } 417 | ] 418 | }, 419 | "LogFacility": { 420 | "description": "Linux kmesg (kernel message buffer) Log Facility.", 421 | "type": "string", 422 | "enum": [ 423 | "Kern", 424 | "User", 425 | "Mail", 426 | "Daemon", 427 | "Auth", 428 | "Syslog", 429 | "Lpr", 430 | "News", 431 | "UUCP", 432 | "Cron", 433 | "AuthPriv", 434 | "FTP" 435 | ] 436 | }, 437 | "LogLevel": { 438 | "description": "Linux kmesg (kernel message buffer) Log Level.", 439 | "type": "string", 440 | "enum": [ 441 | "Emergency", 442 | "Alert", 443 | "Critical", 444 | "Error", 445 | "Warning", 446 | "Notice", 447 | "Info", 448 | "Debug" 449 | ] 450 | }, 451 | "RegisterProbeJustification": { 452 | "anyOf": [ 453 | { 454 | "type": "object", 455 | "required": [ 456 | "FullEvents" 457 | ], 458 | "properties": { 459 | "FullEvents": { 460 | "type": "array", 461 | "items": { 462 | "$ref": "#/definitions/Version" 463 | } 464 | } 465 | } 466 | }, 467 | { 468 | "type": "object", 469 | "required": [ 470 | "RegisterValues" 471 | ], 472 | "properties": { 473 | "RegisterValues": { 474 | "type": "array", 475 | "items": { 476 | "type": "string" 477 | } 478 | } 479 | } 480 | }, 481 | { 482 | "type": "object", 483 | "required": [ 484 | "EventCount" 485 | ], 486 | "properties": { 487 | "EventCount": { 488 | "type": "integer", 489 | "format": "uint", 490 | "minimum": 0.0 491 | } 492 | } 493 | } 494 | ] 495 | }, 496 | "SegfaultAccessMode": { 497 | "description": "The context under which the Segmentation Fault was triggered", 498 | "type": "string", 499 | "enum": [ 500 | "Kernel", 501 | "User" 502 | ] 503 | }, 504 | "SegfaultAccessType": { 505 | "description": "The type of Access that triggered this Segmentation Fault", 506 | "type": "string", 507 | "enum": [ 508 | "Read", 509 | "Write" 510 | ] 511 | }, 512 | "SegfaultErrorCode": { 513 | "description": "Segmentation Fault ErrorCode flags parsed into a structure See more: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/arch/x86/include/asm/traps.h#n167 See more: https://utcc.utoronto.ca/~cks/space/blog/linux/KernelSegfaultMessageMeaning", 514 | "type": "object", 515 | "required": [ 516 | "access_mode", 517 | "access_type", 518 | "instruction_fetch", 519 | "protection_keys_block_access", 520 | "reason", 521 | "use_of_reserved_bit" 522 | ], 523 | "properties": { 524 | "access_mode": { 525 | "description": "The mode under which access was performed", 526 | "allOf": [ 527 | { 528 | "$ref": "#/definitions/SegfaultAccessMode" 529 | } 530 | ] 531 | }, 532 | "access_type": { 533 | "description": "The type of access causing the fault", 534 | "allOf": [ 535 | { 536 | "$ref": "#/definitions/SegfaultAccessType" 537 | } 538 | ] 539 | }, 540 | "instruction_fetch": { 541 | "description": "fault was an instruction fetch, not data read or write", 542 | "type": "boolean" 543 | }, 544 | "protection_keys_block_access": { 545 | "description": "Memory Protection Keys related. Not sure what exactly triggers this. See more: https://lore.kernel.org/patchwork/patch/633070/", 546 | "type": "boolean" 547 | }, 548 | "reason": { 549 | "description": "The reason for the segmentation fault", 550 | "allOf": [ 551 | { 552 | "$ref": "#/definitions/SegfaultReason" 553 | } 554 | ] 555 | }, 556 | "use_of_reserved_bit": { 557 | "description": "use of reserved bits in the page table entry detected (the kernel will panic if this happens)", 558 | "type": "boolean" 559 | } 560 | } 561 | }, 562 | "SegfaultReason": { 563 | "description": "The reason for the Segmentation Fault", 564 | "type": "string", 565 | "enum": [ 566 | "NoPageFound", 567 | "ProtectionFault" 568 | ] 569 | }, 570 | "Version": { 571 | "description": "Event is the complete structure that Polycorder (Polyverse-hosted zero-day detection service) understands. This structure is also the reference schema/format for all detect-efforts.\n\nAs such, it is encouraged to have many detectors that emit data in this structure.\n\nDifferent implementations of the structure may very. Various fields may come or go.\n\nAll parsers are encouraged to first test the \"Version\" field and then parse the correct structure. The field `version` is guaranteed to exist on ALL versions and instances of Event. Any structure/data that does not contain the version field, is considered invalid.", 572 | "anyOf": [ 573 | { 574 | "description": "Version is guaranteed to exist. All other fields may change or not exist, and it is recommended to use a different version when making breaking changes to all other fields. It allows parsers to test on version and determine if they know what to do with the rest. For this particular variant, set DeviceVersion to a fixed value \"V1\"", 575 | "type": "object", 576 | "required": [ 577 | "event", 578 | "timestamp", 579 | "version" 580 | ], 581 | "properties": { 582 | "event": { 583 | "description": "Platform records fields specific to a specific mechanism/platform.", 584 | "allOf": [ 585 | { 586 | "$ref": "#/definitions/EventType" 587 | } 588 | ] 589 | }, 590 | "hostname": { 591 | "type": [ 592 | "string", 593 | "null" 594 | ] 595 | }, 596 | "timestamp": { 597 | "description": "This is universal and important for all events. They occur at a time.", 598 | "type": "string", 599 | "format": "date-time" 600 | }, 601 | "version": { 602 | "type": "string", 603 | "enum": [ 604 | "V1" 605 | ] 606 | } 607 | } 608 | } 609 | ] 610 | } 611 | } 612 | } -------------------------------------------------------------------------------- /reference/zerotect.toml: -------------------------------------------------------------------------------- 1 | verbosity = 0 2 | hostname = 'hostname' 3 | pagerduty_routing_key = 'routing_key' 4 | 5 | [auto_configure] 6 | exception_trace = true 7 | fatal_signals = true 8 | klog_include_timestamp = true 9 | 10 | [analytics] 11 | mode = 'Passthrough' 12 | justification = 'Summary' 13 | collection_timeout_seconds = 10 14 | max_event_count = 20 15 | event_lifetime_seconds = 30 16 | event_drop_count = 5 17 | 18 | [monitor] 19 | gobble_old_events = false 20 | 21 | [console] 22 | format = 'Text' 23 | 24 | [polycorder] 25 | auth_key = 'AuthKeyFromPolyverseAccountManager' 26 | node_id = 'UsefulNodeIdentifierToGroupEvents' 27 | flush_event_count = 10 28 | flush_timeout_seconds = 10 29 | 30 | [syslog] 31 | format = 'Cef' 32 | destination = 'Udp' 33 | path = '# only applicable to unix - path to unix socket to connect to syslog (i.e. /dev/log or /var/run/syslog)' 34 | server = '# applicable to tcp and udp - the host:port to send syslog to (i.e. 127.0.0.1:601 or 127.0.0.1:514)' 35 | local = '# only applicable to udp - the host:port to bind sender to (i.e. 127.0.0.1:0)' 36 | 37 | [logfile] 38 | format = 'Cef' 39 | filepath = '/test/path' 40 | rotation_file_count = 1 41 | rotation_file_max_size = 20 42 | -------------------------------------------------------------------------------- /src/analyzer/close_by_ip_detect.rs: -------------------------------------------------------------------------------- 1 | use crate::common; 2 | use crate::events; 3 | use crate::params; 4 | 5 | use std::collections::VecDeque; 6 | use std::rc::Rc; 7 | use time::OffsetDateTime; 8 | 9 | pub fn close_by_ip_detect( 10 | procname: &str, 11 | eventslist: &VecDeque<(OffsetDateTime, events::Event)>, 12 | ip_max_distance: usize, 13 | justification_threshold: usize, 14 | justification_kind: params::DetectedEventJustification, 15 | ) -> Option<(events::Event, Vec)> { 16 | // collect events with close-IPs (Instruction Pointer) 17 | let mut close_by_ip: Vec = vec![]; 18 | 19 | // go over the event list and calculate ip diffs 20 | // a primitive sliding-window for events 21 | let mut prev_added: bool = false; 22 | let mut maybe_prev_event: Option<&events::Event> = None; 23 | for (_, event) in eventslist.iter() { 24 | if let events::Version::V1 { 25 | timestamp: _, 26 | hostname: _, 27 | event: events::EventType::LinuxKernelTrap(lkt), 28 | } = event.as_ref() 29 | { 30 | if let Some(events::Version::V1 { 31 | timestamp: _, 32 | hostname: _, 33 | event: events::EventType::LinuxKernelTrap(prev_lkt), 34 | }) = maybe_prev_event.map(|x| &(**x)) 35 | { 36 | // analytics only works if there is a prevous event 37 | let ad = common::abs_diff(prev_lkt.ip, lkt.ip); 38 | 39 | // we have winner events 40 | // ignore when IP is identical across events - it may just be a legit crash. 41 | if ad != 0 && ad <= ip_max_distance { 42 | if !prev_added { 43 | // if close_by_ip is empty, add the previous event too 44 | // we can unwrap safely - we're already inside a destructure of it 45 | close_by_ip.push(maybe_prev_event.unwrap().clone()) 46 | } 47 | prev_added = true; 48 | close_by_ip.push(event.clone()); 49 | } else { 50 | prev_added = false; 51 | } 52 | } 53 | 54 | // Make current event the previous event 55 | maybe_prev_event = Some(event); 56 | } 57 | } 58 | 59 | // if we found a sufficient number of close_by_ip events (i.e. 2 or more), we detect an event 60 | if close_by_ip.len() > justification_threshold { 61 | return Some(( 62 | Rc::new(events::Version::V1 { 63 | timestamp: OffsetDateTime::now_utc(), 64 | hostname: common::get_first_event_hostname(&close_by_ip), 65 | event: events::EventType::RegisterProbe(events::RegisterProbe { 66 | register: "ip".to_owned(), 67 | message: "Instruction Pointer probe".to_owned(), 68 | procname: procname.to_owned(), 69 | justification: justify(close_by_ip.clone(), justification_kind), 70 | }), 71 | }), 72 | close_by_ip, 73 | )); 74 | } 75 | 76 | None 77 | } 78 | 79 | fn justify( 80 | justifying_events: Vec, 81 | justification_kind: params::DetectedEventJustification, 82 | ) -> events::RegisterProbeJustification { 83 | match justification_kind { 84 | params::DetectedEventJustification::Full => events::RegisterProbeJustification::FullEvents( 85 | justifying_events, 86 | ), 87 | params::DetectedEventJustification::Summary => events::RegisterProbeJustification::RegisterValues( 88 | justifying_events.iter().filter_map(|e| { 89 | match e.as_ref() { 90 | events::Version::V1 { 91 | timestamp: _, 92 | hostname: _, 93 | event: events::EventType::LinuxKernelTrap(lkt), 94 | } => Some(format!("{}", lkt.ip)), 95 | 96 | _ => { 97 | eprintln!("Analyzer:: close_by_ip_detect::justify: Unsupported event found when summarizing: {}", e); 98 | None 99 | }, 100 | } 101 | }).collect(), 102 | ), 103 | params::DetectedEventJustification::None => events::RegisterProbeJustification::EventCount(justifying_events.len()), 104 | } 105 | } 106 | -------------------------------------------------------------------------------- /src/analyzer/close_by_register_detect.rs: -------------------------------------------------------------------------------- 1 | use crate::common; 2 | use crate::events; 3 | use crate::params; 4 | 5 | use std::collections::VecDeque; 6 | use std::rc::Rc; 7 | use time::OffsetDateTime; 8 | 9 | pub fn close_by_register_detect( 10 | procname: &str, 11 | eventslist: &VecDeque<(OffsetDateTime, events::Event)>, 12 | register: &str, 13 | register_max_distance: usize, 14 | justification_threshold: usize, 15 | justification_kind: params::DetectedEventJustification, 16 | message: &str, 17 | ) -> Option<(events::Event, Vec)> { 18 | // collect events with close-IPs (Instruction Pointer) 19 | let mut close_by_register: Vec = vec![]; 20 | 21 | // go over the event list and calculate ip diffs 22 | // a primitive sliding-window for events 23 | let mut prev_added: bool = false; 24 | let mut maybe_prev_event: Option<&events::Event> = None; 25 | for (_, event) in eventslist.iter() { 26 | if let events::Version::V1 { 27 | timestamp: _, 28 | hostname: _, 29 | event: events::EventType::LinuxFatalSignal(lfs), 30 | } = event.as_ref() 31 | { 32 | if let Some(events::Version::V1 { 33 | timestamp: _, 34 | hostname: _, 35 | event: events::EventType::LinuxFatalSignal(prev_lfs), 36 | }) = maybe_prev_event.map(|x| &(**x)) 37 | { 38 | if let (Some(prev_register_val), Some(register_val)) = ( 39 | prev_lfs 40 | .stack_dump 41 | .get(register) 42 | .map(|v| common::parse_hex::(v)) 43 | .flatten(), 44 | lfs.stack_dump 45 | .get(register) 46 | .map(|v| common::parse_hex::(v)) 47 | .flatten(), 48 | ) { 49 | // analytics only works if there is a prevous event 50 | let ad = common::abs_diff(prev_register_val, register_val); 51 | 52 | // we have winner events 53 | // ignore when IP is identical across events - it may just be a legit crash. 54 | if ad != 0 && ad <= register_max_distance { 55 | if !prev_added { 56 | // if close_by_ip is empty, add the previous event too 57 | // we can unwrap safely - we're already inside a destructure of it 58 | close_by_register.push(maybe_prev_event.unwrap().clone()) 59 | } 60 | prev_added = true; 61 | close_by_register.push(event.clone()); 62 | } else { 63 | prev_added = false; 64 | } 65 | } 66 | } 67 | 68 | // Make current event the previous event 69 | maybe_prev_event = Some(event); 70 | } 71 | } 72 | 73 | // if we found a sufficient number of close_by_ip events (i.e. 2 or more), we detect an event 74 | if close_by_register.len() > justification_threshold { 75 | return Some(( 76 | Rc::new(events::Version::V1 { 77 | timestamp: OffsetDateTime::now_utc(), 78 | hostname: common::get_first_event_hostname(&close_by_register), 79 | event: events::EventType::RegisterProbe(events::RegisterProbe { 80 | register: register.to_owned(), 81 | message: message.to_owned(), 82 | procname: procname.to_owned(), 83 | justification: justify(close_by_register.clone(), register, justification_kind), 84 | }), 85 | }), 86 | close_by_register, 87 | )); 88 | } 89 | 90 | None 91 | } 92 | 93 | fn justify( 94 | justifying_events: Vec, 95 | register: &str, 96 | justification_kind: params::DetectedEventJustification, 97 | ) -> events::RegisterProbeJustification { 98 | match justification_kind { 99 | params::DetectedEventJustification::Full => events::RegisterProbeJustification::FullEvents( 100 | justifying_events, 101 | ), 102 | params::DetectedEventJustification::Summary => events::RegisterProbeJustification::RegisterValues( 103 | justifying_events.iter().filter_map(|e| { 104 | match e.as_ref() { 105 | events::Version::V1 { 106 | timestamp: _, 107 | hostname: _, 108 | event: events::EventType::LinuxFatalSignal(lfs), 109 | } => lfs.stack_dump.get(register).cloned(), 110 | 111 | _ => { 112 | eprintln!("Analyzer:: close_by_register_detect::justify: Unsupported event found when summarizing: {}", e); 113 | None 114 | }, 115 | } 116 | }).collect(), 117 | ), 118 | params::DetectedEventJustification::None => events::RegisterProbeJustification::EventCount(justifying_events.len()), 119 | } 120 | } 121 | -------------------------------------------------------------------------------- /src/analyzer/eventbuffer.rs: -------------------------------------------------------------------------------- 1 | use crate::events; 2 | 3 | use std::collections::hash_map::IterMut; 4 | use std::collections::{BTreeMap, HashMap, VecDeque}; 5 | use std::time::Duration; 6 | use time::OffsetDateTime; 7 | 8 | type TimestampedEvent = (OffsetDateTime, events::Event); 9 | pub type TimestampedEventList = VecDeque; 10 | type ProcNameToTimestampedEventsMap = HashMap; 11 | 12 | /// A Hash(procname)->List(events) so we can look for closely-spaced events in the same procname 13 | /// NOT threadsafe!! Please only operate (insert, remove, analyze, etc.) from a single thread. 14 | pub struct EventBuffer { 15 | _verbosity: u8, 16 | list_capacity: usize, 17 | 18 | max_event_count: usize, 19 | event_lifetime: Duration, 20 | event_drop_count: usize, 21 | 22 | cached_len: usize, 23 | 24 | // HashMap can store references that don't outlive the hashlist 25 | hashlist: ProcNameToTimestampedEventsMap, 26 | } 27 | 28 | /// Primary structure to buffer events as they come in. They're stored against procname, 29 | /// since an attack may be against a particular process. 30 | /// 31 | impl EventBuffer { 32 | pub fn new( 33 | verbosity: u8, 34 | max_event_count: usize, 35 | event_drop_count: usize, 36 | event_lifetime: Duration, 37 | ) -> EventBuffer { 38 | EventBuffer { 39 | _verbosity: verbosity, 40 | list_capacity: max_event_count, 41 | max_event_count, 42 | event_lifetime, 43 | event_drop_count, 44 | cached_len: 0, 45 | hashlist: ProcNameToTimestampedEventsMap::with_capacity(max_event_count), 46 | } 47 | } 48 | 49 | pub fn iter_mut(&mut self) -> IterMut<'_, String, VecDeque<(OffsetDateTime, events::Event)>> { 50 | self.hashlist.iter_mut() 51 | } 52 | 53 | pub fn is_full(&self) -> bool { 54 | self.len() >= self.max_event_count 55 | } 56 | 57 | pub fn is_empty(&self) -> bool { 58 | self.len() == 0 59 | } 60 | 61 | pub fn len(&self) -> usize { 62 | self.cached_len 63 | } 64 | 65 | fn recompute_len(&mut self) -> usize { 66 | let mut total_len: usize = 0; 67 | for (_, list) in self.hashlist.iter() { 68 | total_len += list.len() 69 | } 70 | 71 | self.cached_len = total_len; 72 | 73 | total_len 74 | } 75 | 76 | pub fn insert(&mut self, timestamp: OffsetDateTime, procname: String, event: events::Event) { 77 | // so we don't double-borrow self 78 | let list_capacity = self.list_capacity; 79 | 80 | // Do we have a list for this proc? 81 | self.hashlist 82 | .entry(procname) 83 | .or_insert_with(|| TimestampedEventList::with_capacity(list_capacity)) 84 | .push_back((timestamp, event)); 85 | 86 | self.cached_len += 1; 87 | } 88 | 89 | /// Remove events older than allowed lifetime 90 | /// If total events count (i.e. cached_len) is greater than or 91 | /// equal to maximum allowed events, remove the oldest 'drop_event_count' events. 92 | pub fn cleanup(&mut self) -> usize { 93 | self.remove_expired_events(); 94 | self.recompute_len(); 95 | 96 | // fullness tells us when events are where they should be, 97 | // cleanup happens when they exceed fullness (hence > vs >=). 98 | if self.is_full() { 99 | self.drop_oldest_events(); 100 | } 101 | 102 | // remove procnames whose lists are empty 103 | self.drop_empty_procs(); 104 | 105 | self.recompute_len() 106 | } 107 | 108 | /// Drop 'drop_event_count' number of oldest events across all lists 109 | /// this is highly inefficient (iterating the hashmap multiple times) 110 | /// but we'll make it efficient after we make it work 111 | fn drop_oldest_events(&mut self) { 112 | // if there's only one list, optimize by draining events from it 113 | // and returning early 114 | if self.hashlist.len() == 1 { 115 | let (_, eventlist) = self.hashlist.iter_mut().next().unwrap(); 116 | eventlist.drain(0..self.event_drop_count); 117 | return; 118 | } 119 | 120 | let mut priority_removal_map = BTreeMap::::new(); 121 | 122 | // populate the removal map with timestamp -> procname (the oldest timestamp in each procname) 123 | for (procname, eventlist) in self.hashlist.iter() { 124 | if let Some((timestamp, _)) = eventlist.iter().next() { 125 | priority_removal_map.insert(*timestamp, procname.clone()); 126 | } 127 | } 128 | 129 | // while we have events let to be dropped... 130 | let mut events_remaining_to_drop = self.event_drop_count; 131 | while events_remaining_to_drop > 0 && !priority_removal_map.is_empty() { 132 | // May become more efficient in the future. See: https://github.com/rust-lang/rust/issues/62924 133 | // Find procname having oldest event (since we've got oldest events by procname in BTreeMap) 134 | // Since BTreeMap is sorted ascending by keys, the oldest (i.e. lowest) datetime key will 135 | // come first. 136 | let (borrowed_timestamp, borrowed_procname) = 137 | priority_removal_map.iter().next().unwrap(); 138 | // detach these from the iterator, and thus drop scope of the priority_removal_map borrow 139 | let (timestamp, procname) = (*borrowed_timestamp, borrowed_procname.clone()); 140 | priority_removal_map.remove(×tamp); 141 | 142 | // look up event list for that oldest timestamped event 143 | match self.hashlist.get_mut(&procname) { 144 | None => {} 145 | Some(eventlist) => { 146 | // remove the front-most (oldest event) 147 | eventlist.pop_front(); 148 | events_remaining_to_drop -= 1; 149 | 150 | // add event from front (if any) to the BTreeMap priority list 151 | match eventlist.front() { 152 | None => {} 153 | Some((timestamp, _)) => { 154 | priority_removal_map.insert(*timestamp, procname); 155 | } 156 | } 157 | } 158 | } 159 | } 160 | } 161 | 162 | /// Remove events that are past lifetime. This one is easier. 163 | fn remove_expired_events(&mut self) { 164 | // At what time do events expire? 165 | // make this mutable so comparison below works 166 | let mut event_expiry_time = OffsetDateTime::now_utc() - self.event_lifetime; 167 | 168 | // for each procname in event list 169 | for (_, eventlist) in (&mut self.hashlist).iter_mut() { 170 | // let's go in the event list oldest to youngest and remove expired events... 171 | // iterating over VecDeque goes front to back. 172 | // https://doc.rust-lang.org/std/collections/struct.VecDeque.html 173 | let mut removal_count: usize = 0; 174 | for (timestamp, _) in eventlist.iter() { 175 | if timestamp <= &mut event_expiry_time { 176 | // if current event has expired, remove another event from the front 177 | removal_count += 1; 178 | } else { 179 | // if we got to events which are newer than expiry, we break the loop 180 | // since events go oldest->youngest from front to back 181 | break; 182 | } 183 | } 184 | 185 | eventlist.drain(0..removal_count); 186 | } 187 | } 188 | 189 | /// Remove procname keys for which lists are empty. 190 | fn drop_empty_procs(&mut self) { 191 | let emptykeys: Vec = self 192 | .hashlist 193 | .iter() 194 | .filter_map(|(key, value)| { 195 | if value.is_empty() { 196 | // separate out of the iterator 197 | Some(key.clone()) 198 | } else { 199 | None 200 | } 201 | }) 202 | .collect(); 203 | 204 | for emptykey in emptykeys { 205 | self.hashlist.remove(&emptykey); 206 | } 207 | } 208 | } 209 | 210 | #[cfg(test)] 211 | mod test { 212 | use super::*; 213 | use rand; 214 | use std::rc::Rc; 215 | use std::thread::sleep; 216 | use std::time::Duration; 217 | 218 | macro_rules! map( 219 | { $($key:expr => $value:expr),+ } => { 220 | { 221 | let mut m = ::std::collections::BTreeMap::::new(); 222 | $( 223 | m.insert(String::from($key), String::from($value)); 224 | )+ 225 | m 226 | } 227 | }; 228 | ); 229 | 230 | #[test] 231 | fn ensure_removal_when_beyond_full_single_proc_all_events() { 232 | let mut eb = EventBuffer::new(0, 10, 5, Duration::from_secs(100)); 233 | 234 | // add 10 events 235 | for _ in 0..10 { 236 | let (ts, procname, event) = create_event("Test".to_owned()); 237 | eb.insert(ts, procname, event); 238 | } 239 | assert_eq!(10, eb.len()); 240 | 241 | // add one more event, and lenth should be 5 (drop 5 events) 242 | assert_eq!(5, eb.cleanup()); 243 | 244 | let (ts, procname, event) = create_event("Test".to_owned()); 245 | eb.insert(ts, procname, event); 246 | assert_eq!(6, eb.len()); 247 | assert_eq!(6, eb.cleanup()); 248 | } 249 | 250 | #[test] 251 | fn ensure_removal_when_beyond_full_multiple_procs_single_event() { 252 | let mut eb = EventBuffer::new(0, 10, 5, Duration::from_secs(100)); 253 | 254 | // add 10 events 255 | for i in 0..10 { 256 | let (ts, procname, event) = create_event(format!("TestProc{}", i)); 257 | eb.insert(ts, procname, event); 258 | } 259 | assert_eq!(10, eb.len()); 260 | 261 | // add one more event, and lenth should be 5 (drop 5 events) 262 | assert_eq!(5, eb.cleanup()); 263 | 264 | let (ts, procname, event) = create_event(format!("TestProc{}", 10)); 265 | eb.insert(ts, procname, event); 266 | assert_eq!(6, eb.len()); 267 | assert_eq!(6, eb.cleanup()); 268 | } 269 | 270 | #[test] 271 | fn ensure_removal_when_beyond_full_multiple_procs_multiple_events() { 272 | let mut eb = EventBuffer::new(0, 10, 5, Duration::from_secs(100)); 273 | 274 | // add 10 events 275 | for i in 0..5 { 276 | let (ts, procname, event) = create_event(format!("TestProc{}", i)); 277 | eb.insert(ts, procname, event); 278 | let (ts, procname, event) = create_event(format!("TestProc{}", i)); 279 | eb.insert(ts, procname, event); 280 | } 281 | assert!(eb.is_full()); 282 | assert_eq!(10, eb.len()); 283 | 284 | // add one more event, and lenth should be 5 (drop 5 events) 285 | assert_eq!(5, eb.cleanup()); 286 | assert!(!eb.is_full()); 287 | 288 | let (ts, procname, event) = create_event(format!("TestProc{}", 10)); 289 | eb.insert(ts, procname, event); 290 | assert_eq!(6, eb.len()); 291 | assert_eq!(6, eb.cleanup()); 292 | assert!(!eb.is_full()); 293 | 294 | // We expect only oldest events removed - so none of TestProc0-TestProc1, and one of TestProc2. 295 | assert!(eb.hashlist.get("TestProc0").is_none()); 296 | assert!(eb.hashlist.get("TestProc1").is_none()); 297 | 298 | assert_eq!(4, eb.hashlist.len()); 299 | assert_eq!(1, eb.hashlist.get("TestProc2").unwrap().len()); 300 | assert_eq!(2, eb.hashlist.get("TestProc3").unwrap().len()); 301 | assert_eq!(2, eb.hashlist.get("TestProc4").unwrap().len()); 302 | assert_eq!(1, eb.hashlist.get("TestProc10").unwrap().len()); 303 | } 304 | 305 | #[test] 306 | fn ensure_expiry_multiple_procs_multiple_events() { 307 | let mut eb = EventBuffer::new(0, 10, 5, Duration::from_millis(100)); 308 | 309 | // add 10 events 310 | for i in 0..9 { 311 | let (ts, procname, event) = create_event(format!("TestProc{}", i)); 312 | eb.insert(ts, procname, event); 313 | } 314 | assert!(!eb.is_full()); 315 | 316 | //immediate cleanup removes nothing. 317 | assert_eq!(9, eb.cleanup()); 318 | sleep(Duration::from_secs(2)); 319 | 320 | // remove after 2 seconds 321 | assert_eq!(0, eb.cleanup()); 322 | 323 | assert_eq!(0, eb.hashlist.len()); 324 | } 325 | 326 | fn create_event(procname: String) -> (OffsetDateTime, String, events::Event) { 327 | let timestamp = OffsetDateTime::now_utc(); 328 | let event = match rand::random::() { 329 | true => Rc::new(events::Version::V1 { 330 | timestamp, 331 | hostname: Some("analyzerhost".to_owned()), 332 | event: events::EventType::LinuxKernelTrap(events::LinuxKernelTrap { 333 | facility: rmesg::entry::LogFacility::User, 334 | level: rmesg::entry::LogLevel::Info, 335 | procname: procname.clone(), 336 | pid: 1800, 337 | ip: 0x5000, 338 | sp: 0x6000, 339 | trap: events::KernelTrapType::GeneralProtectionFault, 340 | errcode: events::SegfaultErrorCode::from_error_code(6), 341 | file: None, 342 | vmasize: None, 343 | vmastart: None, 344 | }), 345 | }), 346 | false => Rc::new(events::Version::V1 { 347 | timestamp, 348 | hostname: Some("analyzerhost".to_owned()), 349 | event: events::EventType::LinuxFatalSignal(events::LinuxFatalSignal { 350 | facility: rmesg::entry::LogFacility::User, 351 | level: rmesg::entry::LogLevel::Info, 352 | signal: events::FatalSignalType::Iot, 353 | stack_dump: map!("Comm" => procname.clone()), 354 | }), 355 | }), 356 | }; 357 | 358 | (timestamp, procname, event) 359 | } 360 | } 361 | -------------------------------------------------------------------------------- /src/common.rs: -------------------------------------------------------------------------------- 1 | use crate::events; 2 | use num::{Integer, Unsigned}; 3 | use std::any::type_name; 4 | use std::fmt::Display; 5 | use std::str::FromStr; 6 | 7 | pub fn get_first_event_hostname(events: &[events::Event]) -> Option { 8 | events 9 | .get(0) 10 | .map(|e| e.as_ref().get_hostname().to_owned()) 11 | .flatten() 12 | } 13 | 14 | pub fn parse_fragment(frag: &str) -> Option 15 | where 16 | N::Err: Display, 17 | { 18 | match frag.trim().parse() { 19 | Ok(f) => Some(f), 20 | Err(e) => { 21 | eprintln!("Unable to parse {} into {}: {}", frag, type_name::(), e); 22 | None 23 | } 24 | } 25 | } 26 | 27 | pub fn parse_hex(frag: &str) -> Option 28 | where 29 | N::FromStrRadixErr: Display, 30 | { 31 | // special case 32 | if frag.is_empty() || frag == "(null)" { 33 | return Some(N::zero()); 34 | }; 35 | 36 | // Some register values look like: 0033:0x7f883e3ad43 37 | // only parse the 7f883e3ad43 38 | let sanitized_frag = match frag.find(":0x") { 39 | Some(idx) => &frag[(idx + ":0x".len())..], 40 | None => frag, 41 | }; 42 | 43 | match N::from_str_radix(sanitized_frag.trim(), 16) { 44 | Ok(n) => Some(n), 45 | Err(e) => { 46 | eprintln!("Unable to parse {} into {}: {}", frag, type_name::(), e); 47 | None 48 | } 49 | } 50 | } 51 | 52 | // This will go away after this: https://github.com/rust-lang/rust/issues/62111 53 | pub fn abs_diff(u1: N, u2: N) -> N { 54 | if u1 > u2 { 55 | u1 - u2 56 | } else { 57 | u2 - u1 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /src/emitter/console.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2019 Polyverse Corporation 2 | 3 | use crate::emitter; 4 | use crate::events; 5 | use crate::formatter::new as new_formatter; 6 | use crate::params; 7 | use tokio::sync::broadcast; 8 | 9 | pub async fn emit_forever( 10 | config: params::ConsoleConfig, 11 | mut source: broadcast::Receiver, 12 | ) -> Result<(), emitter::EmitterError> { 13 | let formatter = new_formatter(&config.format); 14 | 15 | loop { 16 | match source.recv().await { 17 | Ok(event) => match formatter.format(&event) { 18 | Ok(formattedstr) => println!("{}", formattedstr), 19 | Err(e) => eprintln!( 20 | "Console Logger: Ignoring error formatting event to {:?}: {}", 21 | config.format, e 22 | ), 23 | }, 24 | Err(broadcast::error::RecvError::Lagged(count)) => eprintln!( 25 | "Console emitter is lagging behind generated events. {} events have been dropped.", 26 | count 27 | ), 28 | Err(broadcast::error::RecvError::Closed) => { 29 | eprintln!("Console emitter event source closed. Exiting."); 30 | return Ok(()); 31 | } 32 | } 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /src/emitter/filelogger.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2020 Polyverse Corporation 2 | 3 | use crate::emitter; 4 | use crate::events; 5 | use crate::formatter::new as new_formatter; 6 | use crate::params::LogFileConfig; 7 | use core::pin::Pin; 8 | use file_rotation::asynchronous::{FileRotate, RotationMode}; 9 | use futures::task::{Context, Poll}; 10 | use pin_project::pin_project; 11 | use std::error; 12 | use std::fmt::{Display, Formatter as FmtFormatter, Result as FmtResult}; 13 | use tokio::fs::{File, OpenOptions}; 14 | use tokio::io::{self, AsyncWrite, AsyncWriteExt, ErrorKind}; 15 | use tokio::sync::broadcast; 16 | 17 | #[derive(Debug)] 18 | pub enum FileLoggerError { 19 | MissingParameter(String), 20 | FileSystem(std::io::Error), 21 | FileRotation(file_rotation::error::Error), 22 | } 23 | 24 | impl error::Error for FileLoggerError {} 25 | impl Display for FileLoggerError { 26 | fn fmt(&self, f: &mut FmtFormatter) -> FmtResult { 27 | match self { 28 | Self::MissingParameter(s) => write!(f, "FileLoggerError::MissingParameter: {}", s), 29 | Self::FileSystem(e) => write!(f, "FileLoggerError::FileSystem internal error: {}", e), 30 | Self::FileRotation(e) => { 31 | write!(f, "FileLoggerError::FileRotation internal error: {}", e) 32 | } 33 | } 34 | } 35 | } 36 | 37 | impl std::convert::From for FileLoggerError { 38 | fn from(err: std::io::Error) -> FileLoggerError { 39 | FileLoggerError::FileSystem(err) 40 | } 41 | } 42 | impl From for FileLoggerError { 43 | fn from(err: file_rotation::error::Error) -> FileLoggerError { 44 | FileLoggerError::FileRotation(err) 45 | } 46 | } 47 | pub async fn emit_forever( 48 | lfc: LogFileConfig, 49 | source: broadcast::Receiver, 50 | ) -> Result<(), emitter::EmitterError> { 51 | // value in having a local error instead of exposing emitter 52 | // to each implementation's errors 53 | Ok(emit_forever_filelogger_error(lfc, source).await?) 54 | } 55 | 56 | #[pin_project(project = FileLoggerProjection)] 57 | enum FileLogger { 58 | FileRotate(#[pin] FileRotate), 59 | File(#[pin] File), 60 | } 61 | 62 | impl AsyncWrite for FileLogger { 63 | fn poll_write( 64 | self: Pin<&mut Self>, 65 | cx: &mut Context<'_>, 66 | buf: &[u8], 67 | ) -> Poll> { 68 | match self.project() { 69 | FileLoggerProjection::FileRotate(fr) => fr.poll_write(cx, buf), 70 | FileLoggerProjection::File(f) => f.poll_write(cx, buf), 71 | } 72 | } 73 | 74 | // pass flush down to the current file 75 | fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { 76 | match self.project() { 77 | FileLoggerProjection::FileRotate(fr) => fr.poll_flush(cx), 78 | FileLoggerProjection::File(f) => f.poll_flush(cx), 79 | } 80 | } 81 | 82 | // pass shutdown down to the current file 83 | fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { 84 | match self.project() { 85 | FileLoggerProjection::FileRotate(fr) => fr.poll_shutdown(cx), 86 | FileLoggerProjection::File(f) => f.poll_shutdown(cx), 87 | } 88 | } 89 | } 90 | 91 | pub async fn emit_forever_filelogger_error( 92 | lfc: LogFileConfig, 93 | mut source: broadcast::Receiver, 94 | ) -> Result<(), FileLoggerError> { 95 | let event_formatter = new_formatter(&lfc.format); 96 | 97 | let mut writer = match lfc.rotation_file_count { 98 | Some(rfc) => match lfc.rotation_file_max_size { 99 | //wrap file in file-rotation 100 | Some(rfms) => FileLogger::FileRotate(FileRotate::new(lfc.filepath, RotationMode::BytesSurpassed(rfms), rfc).await?), 101 | None => return Err(FileLoggerError::MissingParameter("File Logger was provided a rotation_file_count parameter, but not a rotation_file_max_size parameter. Without knowing the maximum size of a file at which to rotate to the next one, the rotation count is meaningless.".to_owned())), 102 | }, 103 | None => match lfc.rotation_file_max_size { 104 | Some(_) => return Err(FileLoggerError::MissingParameter("File Logger was provided a rotation_file_max_size parameter, but not a rotation_file_count parameter. Without knowing the number of files to rotate over, the max size is meaningless.".to_owned())), 105 | None => match OpenOptions::new() 106 | .append(true) 107 | .create_new(true) 108 | .open(&lfc.filepath).await 109 | { 110 | Ok(file) => FileLogger::File(file), 111 | Err(err) => match err.kind() { 112 | ErrorKind::AlreadyExists => FileLogger::File(OpenOptions::new().append(true).open(lfc.filepath).await?), 113 | _ => return Err(FileLoggerError::from(err)), 114 | }, 115 | }, 116 | }, 117 | }; 118 | 119 | loop { 120 | match source.recv().await { 121 | Ok(event) => match event_formatter.format(&event) { 122 | Ok(formattedstr) => { 123 | match writer.write(format!("{}\n", formattedstr).as_bytes()).await { 124 | Ok(_written) => {} 125 | Err(e) => eprintln!("FileLogger: Ignoring error writing to file {}", e), 126 | } 127 | } 128 | Err(e) => eprintln!( 129 | "FileLogger: Ignoring error formatting event to {:?}: {}", 130 | lfc.format, e 131 | ), 132 | }, 133 | Err(broadcast::error::RecvError::Lagged(count)) => { 134 | eprintln!( 135 | "FileLogger is lagging behind generated events. {} events have been dropped.", 136 | count 137 | ) 138 | } 139 | Err(broadcast::error::RecvError::Closed) => { 140 | eprintln!("FileLogger event source closed. Exiting."); 141 | return Ok(()); 142 | } 143 | } 144 | } 145 | } 146 | -------------------------------------------------------------------------------- /src/emitter/mod.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2019 Polyverse Corporation 2 | 3 | use crate::events; 4 | use crate::params; 5 | 6 | use core::future::Future; 7 | use core::pin::Pin; 8 | use futures::future::join_all; 9 | use futures::stream::Stream; 10 | use futures::StreamExt; 11 | use std::convert::From; 12 | use std::error; 13 | use std::fmt::{Display, Formatter, Result as FmtResult}; 14 | use tokio::sync::broadcast; 15 | 16 | mod console; 17 | mod filelogger; 18 | mod pagerduty; 19 | mod polycorder; 20 | mod syslogger; 21 | 22 | type EmitForeverFuture = Pin>>>; 23 | 24 | pub struct EmitterConfig { 25 | pub verbosity: u8, 26 | pub console: Option, 27 | pub polycorder: Option, 28 | pub syslog: Option, 29 | pub logfile: Option, 30 | pub pagerduty_routing_key: Option, 31 | } 32 | 33 | #[derive(Debug)] 34 | pub enum EmitterError { 35 | UnexpectedExit, 36 | StreamEnded, 37 | NoEmitters, 38 | SendError(broadcast::error::SendError), 39 | Polycorder(polycorder::PolycorderError), 40 | Syslogger(syslogger::SysLoggerError), 41 | FileLogger(filelogger::FileLoggerError), 42 | Pagerduty(pagerduty::PagerDutyError), 43 | } 44 | 45 | impl error::Error for EmitterError {} 46 | impl Display for EmitterError { 47 | fn fmt(&self, f: &mut Formatter) -> FmtResult { 48 | match self { 49 | Self::UnexpectedExit => write!(f, "EmitterError: Unexpected exited from an infinite loop. Usually an error is associated with this but none was reported."), 50 | Self::StreamEnded => write!(f, "EmitterError: The event stream unexpectedly exited. Zerotect streams should be perpetual and not expected."), 51 | Self::NoEmitters => write!(f, "EmitterError: No emitters were configured. Zerotect is useless if it isn't emitting to at least one destination."), 52 | Self::SendError(e) => write!(f, "Error Sending an event to Emitters: {}", e), 53 | Self::Polycorder(e) => write!(f, "Error in Polycorder Emitter: {}", e), 54 | Self::Syslogger(e) => write!(f, "Error in Syslogger Emitter: {}", e), 55 | Self::FileLogger(e) => write!(f, "Error in FileLogger Emitter: {}", e), 56 | Self::Pagerduty(e) => write!(f, "Error in Pagerduty Emitter: {}", e), 57 | } 58 | } 59 | } 60 | 61 | impl From for EmitterError { 62 | fn from(err: polycorder::PolycorderError) -> Self { 63 | Self::Polycorder(err) 64 | } 65 | } 66 | 67 | impl From for EmitterError { 68 | fn from(err: syslogger::SysLoggerError) -> Self { 69 | Self::Syslogger(err) 70 | } 71 | } 72 | 73 | impl From for EmitterError { 74 | fn from(err: filelogger::FileLoggerError) -> Self { 75 | Self::FileLogger(err) 76 | } 77 | } 78 | 79 | impl From for EmitterError { 80 | fn from(err: pagerduty::PagerDutyError) -> Self { 81 | Self::Pagerduty(err) 82 | } 83 | } 84 | 85 | impl From> for EmitterError { 86 | fn from(err: broadcast::error::SendError) -> Self { 87 | Self::SendError(err) 88 | } 89 | } 90 | 91 | pub async fn emit_forever( 92 | ec: EmitterConfig, 93 | source: impl Stream + Unpin + 'static, 94 | hostname: Option, 95 | ) -> Result<(), EmitterError> { 96 | eprintln!("Emitter: Initializing..."); 97 | 98 | // start a channel to all emitters with plenty of buffer 99 | let (tx, _) = broadcast::channel(1000); 100 | 101 | let mut emit_forever_futures: Vec = vec![]; 102 | if let Some(cc) = ec.console { 103 | eprintln!("Emitter: Initialized Console emitter. Expect messages to be printed to Standard Output."); 104 | emit_forever_futures.push(Box::pin(console::emit_forever(cc, tx.subscribe()))); 105 | } 106 | if let Some(sc) = ec.syslog { 107 | eprintln!("Emitter: Initialized Syslog emitter. Expect messages to be sent to Syslog."); 108 | emit_forever_futures.push(Box::pin(syslogger::emit_forever( 109 | sc, 110 | hostname, 111 | tx.subscribe(), 112 | ))); 113 | } 114 | if let Some(lfc) = ec.logfile { 115 | eprintln!("Emitter: Initialized LogFile emitter. Expect messages to be sent to a file."); 116 | emit_forever_futures.push(Box::pin(filelogger::emit_forever(lfc, tx.subscribe()))); 117 | } 118 | if let Some(prk) = ec.pagerduty_routing_key { 119 | eprintln!("Emitter: Initialized PagerDuty emitter. Expect messages to be sent to a PagerDuty Service."); 120 | emit_forever_futures.push(Box::pin(pagerduty::emit_forever(prk, tx.subscribe()))); 121 | } 122 | if let Some(tc) = ec.polycorder { 123 | eprintln!("Emitter: Initialized Polycorder emitter. Expect messages to be phoned home to the Polyverse polycorder service."); 124 | emit_forever_futures.push(Box::pin(polycorder::emit_forever( 125 | ec.verbosity, 126 | tc, 127 | tx.subscribe(), 128 | ))); 129 | } 130 | 131 | if emit_forever_futures.is_empty() { 132 | return Err(EmitterError::NoEmitters); 133 | } 134 | 135 | // add the stream-to-broadcasting future as well 136 | emit_forever_futures.push(Box::pin(transmit_forever(source, tx))); 137 | 138 | // then just wait on all of them! 139 | join_all(emit_forever_futures).await; 140 | 141 | Err(EmitterError::StreamEnded) 142 | } 143 | 144 | async fn transmit_forever( 145 | mut source: impl Stream + Unpin, 146 | tx: broadcast::Sender, 147 | ) -> Result<(), EmitterError> { 148 | while let Some(event) = source.next().await { 149 | tx.send(event)?; 150 | } 151 | 152 | Err(EmitterError::UnexpectedExit) 153 | } 154 | -------------------------------------------------------------------------------- /src/emitter/pagerduty.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2019 Polyverse Corporation 2 | 3 | use crate::emitter; 4 | use crate::events; 5 | use pagerduty_rs::{eventsv2async::*, types::*}; 6 | use std::error; 7 | use std::fmt::{Display, Formatter, Result as FmtResult}; 8 | use time::OffsetDateTime; 9 | use tokio::sync::broadcast; 10 | 11 | #[derive(Debug)] 12 | pub enum PagerDutyError { 13 | EventsV2Error(EventsV2Error), 14 | } 15 | impl error::Error for PagerDutyError {} 16 | impl Display for PagerDutyError { 17 | fn fmt(&self, f: &mut Formatter) -> FmtResult { 18 | match self { 19 | Self::EventsV2Error(e) => write!(f, "PagerDutyError::EventsV2Error: {}", e), 20 | } 21 | } 22 | } 23 | impl From for PagerDutyError { 24 | fn from(err: EventsV2Error) -> PagerDutyError { 25 | PagerDutyError::EventsV2Error(err) 26 | } 27 | } 28 | 29 | pub async fn emit_forever( 30 | routing_key: String, 31 | source: broadcast::Receiver, 32 | ) -> Result<(), emitter::EmitterError> { 33 | // It helps to keep a localized error implementation without exposing a lot of 34 | // dependency and interpretation in upper emitter 35 | Ok(emit_forever_pagerduty_error(routing_key, source).await?) 36 | } 37 | 38 | pub async fn emit_forever_pagerduty_error( 39 | routing_key: String, 40 | mut source: broadcast::Receiver, 41 | ) -> Result<(), PagerDutyError> { 42 | let eventsv2 = EventsV2::new(routing_key, Some("zerotect".to_owned()))?; 43 | 44 | loop { 45 | match source.recv().await { 46 | Ok(event) => { 47 | if !event.as_ref().is_analyzed() { 48 | continue; 49 | }; 50 | 51 | let source = match event.as_ref().get_hostname() { 52 | Some(h) => h.to_owned(), 53 | None => "unknown".to_owned(), 54 | }; 55 | 56 | let result = eventsv2 57 | .event(Event::AlertTrigger(AlertTrigger { 58 | payload: AlertTriggerPayload { 59 | summary: "Zerotect detected anomaly".to_owned(), 60 | source, 61 | timestamp: Some(OffsetDateTime::now_utc()), 62 | severity: Severity::Warning, 63 | component: None, 64 | group: None, 65 | class: None, 66 | custom_details: Some(event.as_ref()), 67 | }, 68 | images: None, 69 | links: None, 70 | dedup_key: None, 71 | client: Some("Zerotect".to_owned()), 72 | client_url: Some("https://github.com/polyverse/zerotect".to_owned()), 73 | })) 74 | .await; 75 | 76 | if let Err(err) = result { 77 | eprintln!( 78 | "Error when writing event to pager duty. Not retrying. {}", 79 | err 80 | ); 81 | } 82 | } 83 | Err(broadcast::error::RecvError::Lagged(count)) => { 84 | eprintln!( 85 | "PagerDuty is lagging behind generated events. {} events have been dropped.", 86 | count 87 | ) 88 | } 89 | Err(broadcast::error::RecvError::Closed) => { 90 | eprintln!("PagerDuty event source closed. Exiting."); 91 | return Ok(()); 92 | } 93 | } 94 | } 95 | } 96 | -------------------------------------------------------------------------------- /src/emitter/polycorder.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2019 Polyverse Corporation 2 | 3 | use http::StatusCode; 4 | use libflate::gzip::Encoder; 5 | use reqwest::header::{HeaderMap, HeaderValue, AUTHORIZATION, CONTENT_ENCODING, CONTENT_TYPE}; 6 | use reqwest::Client; 7 | use serde::Serialize; 8 | use std::convert::From; 9 | use std::error; 10 | use std::fmt::{Display, Formatter, Result as FmtResult}; 11 | use std::io::Write; 12 | use std::time::Duration; 13 | use tokio::sync::broadcast; 14 | use tokio::time::timeout; 15 | 16 | use crate::emitter; 17 | use crate::events; 18 | use crate::params; 19 | 20 | const POLYCORDER_PUBLISH_ENDPOINT: &str = "https://polycorder.polyverse.com/v1/events"; 21 | const GZIP_THRESHOLD_BYTES: usize = 512; 22 | const CONTENT_ENCODING_GZIP: &str = "gzip"; 23 | const CONTENT_ENCODING_IDENTITY: &str = "identity"; 24 | const CONTENT_TYPE_JSON: &str = "application/json"; 25 | const USER_AGENT_ZEROTECT: &str = "zerotect"; 26 | 27 | #[derive(Debug)] 28 | pub enum PolycorderError { 29 | IoError(std::io::Error), 30 | InvalidHeaderValue(reqwest::header::InvalidHeaderValue), 31 | ReqwestError(reqwest::Error), 32 | SerdeJson(serde_json::Error), 33 | } 34 | impl error::Error for PolycorderError {} 35 | impl Display for PolycorderError { 36 | fn fmt(&self, f: &mut Formatter) -> FmtResult { 37 | match self { 38 | Self::IoError(e) => write!(f, "PolycorderError::IoError {}", e), 39 | Self::InvalidHeaderValue(e) => write!(f, "PolycorderError::InvalidHeaderValue {}", e), 40 | Self::ReqwestError(e) => write!(f, "PolycorderError::ReqwestError {}", e), 41 | Self::SerdeJson(e) => write!(f, "PolycorderError::SerdeJsonError {}", e), 42 | } 43 | } 44 | } 45 | impl From for PolycorderError { 46 | fn from(err: std::io::Error) -> Self { 47 | Self::IoError(err) 48 | } 49 | } 50 | impl From for PolycorderError { 51 | fn from(err: reqwest::header::InvalidHeaderValue) -> Self { 52 | Self::InvalidHeaderValue(err) 53 | } 54 | } 55 | impl From for PolycorderError { 56 | fn from(err: reqwest::Error) -> Self { 57 | Self::ReqwestError(err) 58 | } 59 | } 60 | impl From for PolycorderError { 61 | fn from(err: serde_json::Error) -> Self { 62 | Self::SerdeJson(err) 63 | } 64 | } 65 | 66 | // The structure to send data to Polycorder in... 67 | #[derive(Serialize)] 68 | struct Report<'l> { 69 | node_id: &'l str, 70 | events: &'l [events::Event], 71 | } 72 | 73 | pub async fn emit_forever( 74 | verbosity: u8, 75 | config: params::PolycorderConfig, 76 | source: broadcast::Receiver, 77 | ) -> Result<(), emitter::EmitterError> { 78 | // It helps to keep a localized error implementation without exposing a lot of 79 | // dependency and interpretation in upper emitter 80 | Ok(emit_forever_polycorder_error(verbosity, config, source).await?) 81 | } 82 | 83 | async fn emit_forever_polycorder_error( 84 | verbosity: u8, 85 | config: params::PolycorderConfig, 86 | mut source: broadcast::Receiver, 87 | ) -> Result<(), PolycorderError> { 88 | let bearer_token = HeaderValue::from_str(format!("Bearer {}", config.auth_key).as_str())?; 89 | let content_type_json = HeaderValue::from_str(CONTENT_TYPE_JSON)?; 90 | 91 | let mut headers = HeaderMap::new(); 92 | headers.insert(AUTHORIZATION, bearer_token); 93 | headers.insert(CONTENT_TYPE, content_type_json); 94 | 95 | let client = Client::builder() 96 | .user_agent(USER_AGENT_ZEROTECT) 97 | .default_headers(headers) 98 | .build()?; 99 | 100 | let mut events: Vec = vec![]; 101 | 102 | let timeout_duration = Duration::from_secs(config.flush_timeout_seconds); 103 | 104 | loop { 105 | let flush = match timeout(timeout_duration, source.recv()).await { 106 | Ok(recv_result) => match recv_result { 107 | Ok(event) => { 108 | events.push(event); 109 | events.len() >= config.flush_event_count 110 | } 111 | Err(broadcast::error::RecvError::Lagged(count)) => { 112 | eprintln!( 113 | "Polycorder is lagging behind generated events. {} events have been dropped.", 114 | count 115 | ); 116 | false 117 | } 118 | Err(broadcast::error::RecvError::Closed) => { 119 | eprintln!("Polycorder event source closed. Exiting."); 120 | return Ok(()); 121 | } 122 | }, 123 | Err(_) => true, 124 | }; 125 | 126 | if flush { 127 | publish_to_polycorder(verbosity, &config, &client, &events).await?; 128 | events.clear(); 129 | } 130 | } 131 | } 132 | 133 | async fn publish_to_polycorder( 134 | verbosity: u8, 135 | config: ¶ms::PolycorderConfig, 136 | client: &Client, 137 | events: &[events::Event], 138 | ) -> Result<(), PolycorderError> { 139 | let report = Report { 140 | node_id: config.node_id.as_str(), 141 | events, 142 | }; 143 | 144 | let json_serialized_report = serde_json::to_vec(&report)?; 145 | 146 | let (body, content_encoding) = encode_payload(json_serialized_report, verbosity); 147 | 148 | let response_result = client 149 | .post(POLYCORDER_PUBLISH_ENDPOINT) 150 | .header(CONTENT_ENCODING, content_encoding) 151 | .body(body) 152 | .send() 153 | .await; 154 | 155 | match response_result { 156 | Ok(response) => { 157 | let status = response.status(); 158 | // explain common statuses a bit more... 159 | if verbosity > 0 && status.is_success() && status == StatusCode::OK { 160 | eprintln!( 161 | "Polycorder: Successfully published {} events. Clearing buffer. Response from Polycorder: {:?}", 162 | events.len(), 163 | response 164 | ); 165 | } else if status.is_server_error() { 166 | eprintln!( 167 | "Polycorder: Unable to publish {} events due to a server-side error. Response from Polycorder: {:?}", 168 | events.len(), 169 | response 170 | ); 171 | } else if status == StatusCode::UNAUTHORIZED { 172 | eprintln!( 173 | "Polycorder: Unable to publish {} events due to a failure to authenticate using the polycorder authkey {}. Response from Polycorder: {:?}", 174 | events.len(), 175 | &config.auth_key, 176 | response 177 | ); 178 | } else { 179 | eprintln!( 180 | "Polycorder: Unexpected error when publishing {} events to Polycorder due to an unexpected error. Response from Polycorder: {:?}", 181 | events.len(), 182 | response 183 | ); 184 | } 185 | } 186 | Err(e) => eprintln!( 187 | "Polycorder: Client error making POST request to Polycorder service {}: {}", 188 | POLYCORDER_PUBLISH_ENDPOINT, e 189 | ), 190 | } 191 | 192 | Ok(()) 193 | } 194 | 195 | fn encode_payload(raw_payload: Vec, verbosity: u8) -> (Vec, &'static str) { 196 | match raw_payload.len() > GZIP_THRESHOLD_BYTES { 197 | true => { 198 | if verbosity > 0 { 199 | eprintln!("Polycorder: Compressing payload because it is {} bytes, thus greater than than threshold of {} bytes", raw_payload.len(), GZIP_THRESHOLD_BYTES); 200 | } 201 | 202 | // Encoding 203 | let mut encoder = match Encoder::new(Vec::new()) { 204 | Ok(encoder) => encoder, 205 | Err(e) => { 206 | eprintln!("Unable to create a GZIP Encoder. Defaulting to uncompressed payload. Error: {:?}", e); 207 | return (raw_payload, CONTENT_ENCODING_IDENTITY); 208 | } 209 | }; 210 | 211 | if let Err(e) = encoder.write_all(&raw_payload) { 212 | eprintln!("Unable to write the serialized raw payload to GZIP encoder. Defaulting to uncompressed payload. Error: {:?}", e); 213 | return (raw_payload, CONTENT_ENCODING_IDENTITY); 214 | }; 215 | 216 | let compressed_payload = match encoder.finish().into_result() { 217 | Ok(compressed) => compressed, 218 | Err(e) => { 219 | eprintln!("Unable to GZIP the contents. Defaulting to uncompressed payload. Error: {:?}", e); 220 | return (raw_payload, CONTENT_ENCODING_IDENTITY); 221 | } 222 | }; 223 | 224 | if verbosity > 1 { 225 | eprintln!( 226 | "GZIPed down to {} bytes from original {} bytes.", 227 | compressed_payload.len(), 228 | raw_payload.len() 229 | ); 230 | } 231 | 232 | (compressed_payload, CONTENT_ENCODING_GZIP) 233 | } 234 | false => { 235 | if verbosity > 0 { 236 | eprintln!("Polycorder: Not compressing upload payload because it is {} bytes, thus smaller than (or equal to) threshold of {} bytes", raw_payload.len(), GZIP_THRESHOLD_BYTES); 237 | } 238 | (raw_payload, CONTENT_ENCODING_IDENTITY) 239 | } 240 | } 241 | } 242 | -------------------------------------------------------------------------------- /src/emitter/syslogger.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2020 Polyverse Corporation 2 | 3 | use crate::emitter; 4 | use crate::events; 5 | use crate::formatter::new as new_formatter; 6 | use crate::params; 7 | use libc::getpid; 8 | use params::{SyslogConfig, SyslogDestination}; 9 | use std::error; 10 | use std::fmt::{Display, Formatter as FmtFormatter, Result as FmtResult}; 11 | use syslog::{Error as SyslogError, Facility, Formatter3164}; 12 | use tokio::sync::broadcast; 13 | 14 | #[derive(Debug)] 15 | pub enum SysLoggerError { 16 | MissingParameter(String), 17 | Syslog(SyslogError), 18 | } 19 | 20 | impl error::Error for SysLoggerError {} 21 | impl Display for SysLoggerError { 22 | fn fmt(&self, f: &mut FmtFormatter) -> FmtResult { 23 | match self { 24 | Self::MissingParameter(s) => write!(f, "SysLoggerError::MissingParameter: {}", s), 25 | Self::Syslog(s) => write!(f, "SysLoggerError::Syslog internal error: {}", s), 26 | } 27 | } 28 | } 29 | impl From for SysLoggerError { 30 | fn from(err: SyslogError) -> Self { 31 | Self::Syslog(err) 32 | } 33 | } 34 | 35 | pub async fn emit_forever( 36 | sc: SyslogConfig, 37 | hostname: Option, 38 | source: broadcast::Receiver, 39 | ) -> Result<(), emitter::EmitterError> { 40 | // Value in capturing local errors here and wrapping them to EmitterError 41 | Ok(emit_forever_syslogger_error(sc, hostname, source).await?) 42 | } 43 | 44 | pub async fn emit_forever_syslogger_error( 45 | sc: SyslogConfig, 46 | hostname: Option, 47 | mut source: broadcast::Receiver, 48 | ) -> Result<(), SysLoggerError> { 49 | let pid = getpid_safe(); 50 | let syslog_formatter = Formatter3164 { 51 | facility: Facility::LOG_USER, 52 | hostname, 53 | process: "zerotect".to_owned(), 54 | pid: pid as u32, 55 | }; 56 | 57 | // fire up the syslogger logger 58 | let mut inner_logger = match sc.destination { 59 | SyslogDestination::Default => match syslog::unix(syslog_formatter.clone()) { 60 | Ok(unix_logger) => unix_logger, 61 | // logic copied from 'init' 62 | // https://docs.rs/syslog/5.0.0/src/syslog/lib.rs.html#429 63 | Err(unix_err) => { 64 | eprintln!("Unable to connect to syslog on the default unix sockets: {}. Moving on to TCP...", unix_err); 65 | match syslog::tcp(syslog_formatter.clone(), "127.0.0.1:601") { 66 | Err(tcp_err) => { 67 | eprintln!("Unable to connect to syslog on the default tcp endpoint: {}. Moving on to UDP (this rarely fails)...", tcp_err); 68 | syslog::udp(syslog_formatter, "127.0.0.1:0", "127.0.0.1:514")? 69 | }, 70 | Ok(tcp_logger) => tcp_logger, 71 | } 72 | }, 73 | }, 74 | SyslogDestination::Unix => match sc.path { 75 | Some(path) => syslog::unix_custom(syslog_formatter, path)?, 76 | None => return Err(SysLoggerError::MissingParameter("Parameter 'path' was not provided, but required to connect syslog to unix socket.".to_owned())), 77 | }, 78 | SyslogDestination::Tcp => match sc.server { 79 | Some(server) => syslog::tcp(syslog_formatter, server)?, 80 | None => return Err(SysLoggerError::MissingParameter("Parameter 'server' was not provided, but required to connect syslog to unix socket.".to_owned())), 81 | }, 82 | SyslogDestination::Udp => match sc.server { 83 | Some(server) => match sc.local { 84 | Some(local) => syslog::udp(syslog_formatter, local, server)?, 85 | None => return Err(SysLoggerError::MissingParameter("Parameter 'local' was not provided, but required to connect syslog to unix socket.".to_owned())), 86 | }, 87 | None => return Err(SysLoggerError::MissingParameter("Parameter 'server' was not provided, but required to connect syslog to unix socket.".to_owned())), 88 | }, 89 | }; 90 | 91 | let event_formatter = new_formatter(&sc.format); 92 | 93 | loop { 94 | match source.recv().await { 95 | Ok(event) => match event_formatter.format(&event) { 96 | Ok(formattedstr) => { 97 | if let Err(e) = inner_logger.info(&formattedstr) { 98 | eprintln!( 99 | "Error writing event to syslog due to error {:?}. The event string: {}", 100 | e, &formattedstr 101 | ) 102 | } 103 | } 104 | Err(e) => eprintln!("Error formatting event to {:?}: {}", sc.format, e), 105 | }, 106 | Err(broadcast::error::RecvError::Lagged(count)) => { 107 | eprintln!( 108 | "Syslogger is lagging behind generated events. {} events have been dropped.", 109 | count 110 | ) 111 | } 112 | Err(broadcast::error::RecvError::Closed) => { 113 | eprintln!("Syslogger event source closed. Exiting."); 114 | return Ok(()); 115 | } 116 | } 117 | } 118 | } 119 | 120 | fn getpid_safe() -> i32 { 121 | unsafe { getpid() } 122 | } 123 | -------------------------------------------------------------------------------- /src/events.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2019 Polyverse Corporation 2 | 3 | use num_derive::FromPrimitive; 4 | use rmesg::entry; 5 | use serde::{ser::Error as SerializeError, Serialize, Serializer}; 6 | use std::collections::{BTreeMap, HashMap}; 7 | use std::fmt::{Display, Formatter, Result as FmtResult}; 8 | use std::rc::Rc; 9 | use strum_macros::Display; 10 | use strum_macros::EnumString; 11 | use time::{format_description::well_known::Rfc3339, OffsetDateTime}; 12 | 13 | //use rust_cef::{ToCef, CefHeaderVersion, CefHeaderDeviceVendor, CefHeaderDeviceProduct, CefHeaderDeviceVersion, CefHeaderDeviceEventClassID, CefHeaderName, CefHeaderSeverity, CefExtensions}; 14 | use rust_cef_derive::{ 15 | CefExtensions, CefHeaderDeviceEventClassID, CefHeaderDeviceProduct, CefHeaderDeviceVendor, 16 | CefHeaderDeviceVersion, CefHeaderName, CefHeaderSeverity, CefHeaderVersion, ToCef, 17 | }; 18 | //use rust_cef_derive::{cef_values, cef_inherit, cef_ext_values} 19 | 20 | #[cfg(test)] 21 | use serde::{de, Deserialize}; 22 | 23 | pub type Event = Rc; 24 | 25 | /// Event is the complete structure that Polycorder (Polyverse-hosted 26 | /// zero-day detection service) understands. This structure is also 27 | /// the reference schema/format for all detect-efforts. 28 | /// 29 | /// As such, it is encouraged to have many detectors that emit 30 | /// data in this structure. 31 | /// 32 | /// Different implementations of the structure may very. Various fields 33 | /// may come or go. 34 | /// 35 | /// All parsers are encouraged to first test the "Version" field and then 36 | /// parse the correct structure. The field `version` is guaranteed to exist 37 | /// on ALL versions and instances of Event. Any structure/data that does not 38 | /// contain the version field, is considered invalid. 39 | /// 40 | #[derive( 41 | Debug, 42 | PartialEq, 43 | Clone, 44 | Serialize, 45 | ToCef, 46 | CefHeaderVersion, 47 | CefHeaderDeviceVendor, 48 | CefHeaderDeviceProduct, 49 | CefHeaderDeviceVersion, 50 | CefHeaderDeviceEventClassID, 51 | CefHeaderName, 52 | CefHeaderSeverity, 53 | CefExtensions, 54 | )] 55 | #[cfg_attr(test, derive(Deserialize))] 56 | #[cef_values( 57 | CefHeaderVersion = "0", 58 | CefHeaderDeviceVendor = "polyverse", 59 | CefHeaderDeviceProduct = "zerotect" 60 | )] 61 | #[serde(tag = "version")] 62 | pub enum Version { 63 | /// Version is guaranteed to exist. All other fields may change or not exist, 64 | /// and it is recommended to use a different version when making breaking changes 65 | /// to all other fields. It allows parsers to test on version and determine if they 66 | /// know what to do with the rest. 67 | /// For this particular variant, set DeviceVersion to a fixed value "V1" 68 | #[cef_values(CefHeaderDeviceVersion = "1.0")] 69 | V1 { 70 | /// This is universal and important for all events. They occur at a time. 71 | #[cef_ext_gobble] 72 | #[serde(serialize_with = "datetime_to_iso8601")] 73 | #[cfg_attr(test, serde(deserialize_with = "iso8601_to_datetime"))] 74 | timestamp: OffsetDateTime, 75 | 76 | #[cef_ext_field(dhost)] 77 | #[serde(skip_serializing_if = "Option::is_none")] 78 | hostname: Option, 79 | 80 | /// Platform records fields specific to a specific mechanism/platform. 81 | // For this variant, inherit the other three headers from the event field 82 | #[cef_inherit(CefHeaderDeviceEventClassID, CefHeaderName, CefHeaderSeverity)] 83 | #[cef_ext_gobble] 84 | event: EventType, 85 | }, 86 | } 87 | 88 | impl Version { 89 | pub fn get_hostname(&self) -> &Option { 90 | match self { 91 | Self::V1 { 92 | timestamp: _, 93 | hostname, 94 | event: _, 95 | } => hostname, 96 | } 97 | } 98 | 99 | /// if true, the event is not raw, but rather an analyzed detection 100 | pub fn is_analyzed(&self) -> bool { 101 | match self { 102 | Self::V1 { 103 | timestamp: _, 104 | hostname: _, 105 | event, 106 | } => matches!(event, EventType::RegisterProbe(_)), 107 | } 108 | } 109 | } 110 | 111 | impl Display for Version { 112 | fn fmt(&self, f: &mut Formatter) -> FmtResult { 113 | match self { 114 | Version::V1 { 115 | timestamp, 116 | hostname, 117 | event, 118 | } => write!( 119 | f, 120 | "Event::{}", 121 | hostname.as_deref().unwrap_or(""), 122 | timestamp, 123 | event 124 | ), 125 | } 126 | } 127 | } 128 | 129 | /// The Platform this event originated on. 130 | #[derive( 131 | Debug, 132 | PartialEq, 133 | Clone, 134 | Serialize, 135 | CefHeaderDeviceEventClassID, 136 | CefHeaderName, 137 | CefHeaderSeverity, 138 | CefExtensions, 139 | )] 140 | #[cfg_attr(test, derive(Deserialize))] 141 | #[serde(tag = "type")] 142 | pub enum EventType { 143 | /// An analytics-detected internal event based on other events 144 | #[cef_values( 145 | CefHeaderDeviceEventClassID = "RegisterProbe", 146 | CefHeaderName = "Probe using Register Increment", 147 | CefHeaderSeverity = "10" 148 | )] 149 | RegisterProbe(#[cef_ext_gobble] RegisterProbe), 150 | 151 | /// The Linux platform and event details in the Linux context 152 | /// A Kernel Trap event - the kernel stops process execution for attempting something stupid 153 | #[cef_values( 154 | CefHeaderDeviceEventClassID = "LinuxKernelTrap", 155 | CefHeaderName = "Linux Kernel Trap", 156 | CefHeaderSeverity = "10" 157 | )] 158 | LinuxKernelTrap(#[cef_ext_gobble] LinuxKernelTrap), 159 | 160 | /// A Fatal Signal from the process because the process did something stupid 161 | #[cef_values( 162 | CefHeaderDeviceEventClassID = "LinuxFatalSignal", 163 | CefHeaderName = "Linux Fatal Signal", 164 | CefHeaderSeverity = "10" 165 | )] 166 | LinuxFatalSignal(#[cef_ext_gobble] LinuxFatalSignal), 167 | 168 | /// Information about a suppressed callback i.e. when a particular 169 | /// type of error happens so much it is suppressed 'n' times. 170 | /// 171 | /// This captures what the log was, and how many times it was suppressed. 172 | /// 173 | /// This is a crucial data point because under Blind ROP attacks an error 174 | /// might happen thousands of times but may only be logged once, with all the 175 | /// remaining attempts being suppressed. 176 | #[cef_values( 177 | CefHeaderDeviceEventClassID = "LinuxSuppressedCallback", 178 | CefHeaderName = "Linux kernel suppressed repetitive log entries", 179 | CefHeaderSeverity = "3" 180 | )] 181 | LinuxSuppressedCallback(#[cef_ext_gobble] LinuxSuppressedCallback), 182 | 183 | /// This is a zerotect-internal event. zerotect can be commanded to set and ensure certain 184 | /// configuration settings to capture events, such as enabling kernel fatal-signals, or 185 | /// core dumps. 186 | /// 187 | /// This event is triggered when, after zerotect has configured a machine as commanded, the 188 | /// configuration later mismatched. It means someone attempted to undo those changes. 189 | /// 190 | /// This event usually tells an observer they may not be seeing other events because they may be 191 | /// disabled. 192 | #[cef_values( 193 | CefHeaderDeviceEventClassID = "ConfigMismatch", 194 | CefHeaderName = "Configuration mismatched what zerotect expected", 195 | CefHeaderSeverity = "4" 196 | )] 197 | ConfigMismatch(#[cef_ext_gobble] ConfigMismatch), 198 | } 199 | 200 | impl Display for EventType { 201 | fn fmt(&self, f: &mut Formatter) -> FmtResult { 202 | match self { 203 | EventType::RegisterProbe(RegisterProbe { 204 | register, 205 | message, 206 | procname, 207 | justification, 208 | }) => { 209 | write!(f, 210 | "In process {}, Register {} found close to each other {} times indicating: {}. The set of events that justify this analyzed event are: {:?}", procname, register, justification.len(), message, justification) 211 | } 212 | EventType::LinuxKernelTrap(LinuxKernelTrap { 213 | level, 214 | facility, 215 | trap, 216 | procname, 217 | pid, 218 | ip, 219 | sp, 220 | errcode, 221 | file, 222 | vmasize, 223 | vmastart, 224 | }) => { 225 | write!( 226 | f, 227 | "{}:: {} by process {}(pid:{}, instruction pointer: {}, stack pointer: {})", 228 | level, 229 | facility, 230 | trap, 231 | errcode, 232 | procname, 233 | pid, 234 | ip, 235 | sp 236 | )?; 237 | 238 | if let (Some(file), Some(vmastart), Some(vmasize)) = 239 | (file.as_ref(), vmastart, vmasize) 240 | { 241 | write!( 242 | f, 243 | " in file {} (VMM region {} of size {})", 244 | file, vmastart, vmasize 245 | )?; 246 | } 247 | 248 | Ok(()) 249 | } 250 | EventType::LinuxFatalSignal(LinuxFatalSignal { 251 | level, 252 | facility, 253 | signal, 254 | stack_dump, 255 | }) => { 256 | write!( 257 | f, 258 | "Fatal Signal: {}({}, StackDump: {:?})", 259 | level, 260 | facility, 261 | signal, 262 | // https://stackoverflow.com/questions/31358826/how-do-i-convert-an-enum-reference-to-a-number 263 | *signal as u8, 264 | stack_dump, 265 | ) 266 | } 267 | EventType::LinuxSuppressedCallback(LinuxSuppressedCallback { 268 | level, 269 | facility, 270 | function_name, 271 | count, 272 | }) => write!( 273 | f, 274 | "Suppressed {} callbacks to {}", 275 | level, facility, count, &function_name, 276 | ), 277 | EventType::ConfigMismatch(ConfigMismatch { 278 | key, 279 | expected_value, 280 | observed_value, 281 | }) => write!( 282 | f, 283 | "Configuration key {} should have been {}, but found to be {}", 284 | &key, &expected_value, &observed_value 285 | ), 286 | } 287 | } 288 | } 289 | 290 | /// This event represents a probe using a Register 291 | /// i.e. someone is probing/fuzzing a program with different values of 292 | /// a particular register. 293 | /// 294 | /// When probing a stack canary, RDI/RSI increment by one value, for instance. 295 | /// 296 | #[derive(Debug, PartialEq, Clone, Serialize, CefExtensions)] 297 | #[cfg_attr(test, derive(Deserialize))] 298 | #[cef_ext_values(cs1Label = "register")] 299 | pub struct RegisterProbe { 300 | /// Which register was being probed? 301 | #[cef_ext_field(cs1)] 302 | pub register: String, 303 | 304 | /// What does this probe mean? What interpretation could this 305 | /// particular register probe have? 306 | #[cef_ext_field(msg)] 307 | pub message: String, 308 | 309 | // The process in which this register probe occurred 310 | #[cef_ext_field(dproc)] 311 | pub procname: String, 312 | 313 | /// The raw events which justify this analytics event. 314 | #[cef_ext_gobble] 315 | pub justification: RegisterProbeJustification, 316 | } 317 | 318 | #[derive(Debug, PartialEq, Clone, Serialize)] 319 | #[cfg_attr(test, derive(Deserialize))] 320 | pub enum RegisterProbeJustification { 321 | FullEvents(Vec), 322 | RegisterValues(Vec), 323 | EventCount(usize), 324 | } 325 | 326 | impl RegisterProbeJustification { 327 | pub fn len(&self) -> usize { 328 | match self { 329 | RegisterProbeJustification::FullEvents(events) => events.len(), 330 | RegisterProbeJustification::RegisterValues(values) => values.len(), 331 | RegisterProbeJustification::EventCount(count) => *count, 332 | } 333 | } 334 | } 335 | 336 | impl rust_cef::CefExtensions for RegisterProbeJustification { 337 | fn cef_extensions( 338 | &self, 339 | collector: &mut HashMap, 340 | ) -> rust_cef::CefExtensionsResult { 341 | collector.insert("cn1Label".to_owned(), "justifying_event_count".to_owned()); 342 | collector.insert("cn1".to_owned(), format!("{}", self.len())); 343 | Ok(()) 344 | } 345 | } 346 | 347 | #[derive(Debug, PartialEq, Clone, Serialize, CefExtensions)] 348 | #[cef_ext_values( 349 | cn2Label = "vmastart", 350 | cn3Label = "vmasize", 351 | flexString2Label = "signal" 352 | )] 353 | #[cfg_attr(test, derive(Deserialize))] 354 | pub struct LinuxKernelTrap { 355 | /// The type of kernel trap triggered 356 | /// A Log-level for this event - was it critical? 357 | pub level: entry::LogLevel, 358 | 359 | /// A Log-facility - most OSes would have one, but this is Linux-specific for now 360 | pub facility: entry::LogFacility, 361 | 362 | #[cef_ext_field(flexString2)] 363 | pub trap: KernelTrapType, 364 | 365 | #[cef_ext_field(dproc)] 366 | /// Name of the process in which the trap occurred 367 | pub procname: String, 368 | 369 | #[cef_ext_field(dpid)] 370 | /// Process ID 371 | pub pid: usize, 372 | 373 | /// Instruction Pointer (what memory address was executing) 374 | #[cef_ext_field(PolyverseZerotectInstructionPointerValue)] 375 | pub ip: usize, 376 | 377 | /// Stack Pointer 378 | #[cef_ext_field(PolyverseZerotectStackPointerValue)] 379 | pub sp: usize, 380 | 381 | /// The error code for the trap 382 | #[cef_ext_gobble] 383 | pub errcode: SegfaultErrorCode, 384 | 385 | /// (Optional) File in which the trap occurred (could be the main executable or library). 386 | #[cef_ext_field(fname)] 387 | #[serde(skip_serializing_if = "Option::is_none")] 388 | pub file: Option, 389 | 390 | /// (Optional) The Virtual Memory Address where this file (main executable or library) was mapped (with ASLR could be arbitrary). 391 | #[cef_ext_field(cn2)] 392 | #[serde(skip_serializing_if = "Option::is_none")] 393 | pub vmastart: Option, 394 | 395 | /// (Optional) The Virtual Memory Size of this file's mapping. 396 | #[cef_ext_field(cn3)] 397 | #[serde(skip_serializing_if = "Option::is_none")] 398 | pub vmasize: Option, 399 | } 400 | 401 | #[derive(Debug, PartialEq, Clone, Serialize, CefExtensions)] 402 | #[cef_ext_values(flexString2Label = "signal")] 403 | #[cfg_attr(test, derive(Deserialize))] 404 | pub struct LinuxFatalSignal { 405 | /// A Log-level for this event - was it critical? 406 | pub level: entry::LogLevel, 407 | 408 | /// A Log-facility - most OSes would have one, but this is Linux-specific for now 409 | pub facility: entry::LogFacility, 410 | 411 | /// The type of Fatal triggered 412 | #[cef_ext_field(flexString2)] 413 | pub signal: FatalSignalType, 414 | 415 | /// An Optional Stack Dump if one was found and parsable. 416 | /// Do not place these in CEF format since ArcSight/Microfocus needs explicit field mappings. 417 | /// No telling what a real dump of registers/values might be contained here. Best to be safe. 418 | /// If you care about these values, use JSON/Text logging. 419 | pub stack_dump: BTreeMap, 420 | } 421 | 422 | impl Display for LinuxFatalSignal { 423 | fn fmt(&self, f: &mut Formatter) -> FmtResult { 424 | write!( 425 | f, 426 | "LinuxFataSignal {} with level {} from facility {} and dump: {:?}", 427 | self.signal, self.level, self.facility, self.stack_dump 428 | ) 429 | } 430 | } 431 | 432 | #[derive(Debug, PartialEq, Clone, Serialize, CefExtensions)] 433 | #[cef_ext_values(flexString1Label = "function_name")] 434 | #[cfg_attr(test, derive(Deserialize))] 435 | pub struct LinuxSuppressedCallback { 436 | /// A Log-level for this event - was it critical? 437 | pub level: entry::LogLevel, 438 | 439 | /// A Log-facility - most OSes would have one, but this is Linux-specific for now 440 | pub facility: entry::LogFacility, 441 | 442 | /// Name of the function being suppressed/folded. 443 | #[cef_ext_field(flexString1)] 444 | pub function_name: String, 445 | 446 | /// Number of times it was suppressed. 447 | #[cef_ext_field(cnt)] 448 | pub count: usize, 449 | } 450 | 451 | #[derive(Debug, PartialEq, Clone, Serialize, CefExtensions)] 452 | #[cfg_attr(test, derive(Deserialize))] 453 | pub struct ConfigMismatch { 454 | /// The key in question whose values mismatched. 455 | #[cef_ext_field(PolyverseZerotectKey)] 456 | pub key: String, 457 | 458 | /// The value zerotect configured and thus expected. 459 | #[cef_ext_field(PolyverseZerotectExpectedValue)] 460 | pub expected_value: String, 461 | 462 | /// The value zerotect observed. 463 | #[cef_ext_field(PolyverseZerotectObservedValue)] 464 | pub observed_value: String, 465 | } 466 | 467 | /// The types of kernel traps understood 468 | #[derive(Debug, PartialEq, Clone, Serialize)] 469 | #[cfg_attr(test, derive(Deserialize))] 470 | #[serde(tag = "type")] 471 | pub enum KernelTrapType { 472 | /// This is type zerotect doesn't know how to parse. So it captures and stores the string description. 473 | Generic { 474 | description: String, 475 | }, 476 | 477 | /// Segfault occurs when an invalid memory access is performed (writing to read-only memory, 478 | /// executing non-executable memory, etc.) 479 | Segfault { 480 | location: usize, 481 | }, 482 | 483 | /// Invalid Opcode occurs when the processor doesn't understand an opcode. This usually occurs 484 | /// when execution jumps to an otherwise data segment, or in the wrong byte within an instruction. 485 | InvalidOpcode, 486 | 487 | // General Protection Fault 488 | GeneralProtectionFault, 489 | } 490 | 491 | impl Display for KernelTrapType { 492 | fn fmt(&self, f: &mut Formatter) -> FmtResult { 493 | match self { 494 | KernelTrapType::Segfault { location } => write!(f, "Segfault at location {}", location), 495 | KernelTrapType::InvalidOpcode => write!(f, "Invalid Opcode"), 496 | KernelTrapType::GeneralProtectionFault => write!(f, "General Protection Fault"), 497 | KernelTrapType::Generic { description } => { 498 | write!(f, "Please parse this kernel trap: {}", description) 499 | } 500 | } 501 | } 502 | } 503 | 504 | /// The reason for the Segmentation Fault 505 | #[derive(EnumString, Debug, Display, PartialEq, Clone, Serialize)] 506 | #[cfg_attr(test, derive(Deserialize))] 507 | pub enum SegfaultReason { 508 | /// The page attempted to access was not found (i.e. in invalid memory address) 509 | NoPageFound, 510 | 511 | /// The memory access was illegal (i.e. protection kicked in) 512 | /// For example, writing to read-only memory or executing non-executable memory. 513 | ProtectionFault, 514 | } 515 | 516 | /// The type of Access that triggered this Segmentation Fault 517 | #[derive(EnumString, Debug, Display, PartialEq, Clone, Serialize)] 518 | #[cfg_attr(test, derive(Deserialize))] 519 | pub enum SegfaultAccessType { 520 | /// Attempting to Read 521 | Read, 522 | 523 | /// Attempting to Write 524 | Write, 525 | } 526 | 527 | /// The context under which the Segmentation Fault was triggered 528 | #[derive(EnumString, Debug, Display, PartialEq, Clone, Serialize)] 529 | #[cfg_attr(test, derive(Deserialize))] 530 | pub enum SegfaultAccessMode { 531 | /// Process was in kernel mode (during a syscall, context switch, etc.) 532 | Kernel, 533 | /// Process was in user mode (userspace), i.e. the program was at fault. 534 | User, 535 | } 536 | 537 | /// Segmentation Fault ErrorCode flags parsed into a structure 538 | /// See more: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/arch/x86/include/asm/traps.h#n167 539 | /// See more: https://utcc.utoronto.ca/~cks/space/blog/linux/KernelSegfaultMessageMeaning 540 | #[derive(Debug, PartialEq, Clone, Serialize, CefExtensions)] 541 | #[cef_ext_values( 542 | cs2Label = "access_type", 543 | cs3Label = "access_mode", 544 | cs4Label = "use_of_reserved_bit", 545 | cs5Label = "instruction_fetch", 546 | cs6Label = "protection_keys_block_access" 547 | )] 548 | #[cfg_attr(test, derive(Deserialize))] 549 | pub struct SegfaultErrorCode { 550 | /// The reason for the segmentation fault 551 | #[cef_ext_field] 552 | pub reason: SegfaultReason, 553 | 554 | /// The type of access causing the fault 555 | #[cef_ext_field(cs2)] 556 | pub access_type: SegfaultAccessType, 557 | 558 | /// The mode under which access was performed 559 | #[cef_ext_field(cs3)] 560 | pub access_mode: SegfaultAccessMode, 561 | 562 | /// use of reserved bits in the page table entry detected (the kernel will panic if this happens) 563 | #[cef_ext_field(cs4)] 564 | pub use_of_reserved_bit: bool, 565 | 566 | /// fault was an instruction fetch, not data read or write 567 | #[cef_ext_field(cs5)] 568 | pub instruction_fetch: bool, 569 | 570 | /// Memory Protection Keys related. Not sure what exactly triggers this. 571 | /// See more: https://lore.kernel.org/patchwork/patch/633070/ 572 | #[cef_ext_field(cs6)] 573 | pub protection_keys_block_access: bool, 574 | } 575 | 576 | impl SegfaultErrorCode { 577 | const REASON_BIT: usize = 1 << 0; 578 | const ACCESS_TYPE_BIT: usize = 1 << 1; 579 | const ACCESS_MODE_BIT: usize = 1 << 2; 580 | const USE_OF_RESERVED_BIT: usize = 1 << 3; 581 | const INSTRUCTION_FETCH_BIT: usize = 1 << 4; 582 | const PROTECTION_KEYS_BLOCK_ACCESS_BIT: usize = 1 << 5; 583 | 584 | // errcode is now long 585 | pub fn from_error_code(code: usize) -> SegfaultErrorCode { 586 | SegfaultErrorCode { 587 | reason: match (code & SegfaultErrorCode::REASON_BIT) > 0 { 588 | false => SegfaultReason::NoPageFound, 589 | true => SegfaultReason::ProtectionFault, 590 | }, 591 | access_type: match (code & SegfaultErrorCode::ACCESS_TYPE_BIT) > 0 { 592 | false => SegfaultAccessType::Read, 593 | true => SegfaultAccessType::Write, 594 | }, 595 | access_mode: match (code & SegfaultErrorCode::ACCESS_MODE_BIT) > 0 { 596 | false => SegfaultAccessMode::Kernel, 597 | true => SegfaultAccessMode::User, 598 | }, 599 | use_of_reserved_bit: (code & SegfaultErrorCode::USE_OF_RESERVED_BIT) > 0, 600 | instruction_fetch: (code & SegfaultErrorCode::INSTRUCTION_FETCH_BIT) > 0, 601 | protection_keys_block_access: (code 602 | & SegfaultErrorCode::PROTECTION_KEYS_BLOCK_ACCESS_BIT) 603 | > 0, 604 | } 605 | } 606 | } 607 | 608 | impl Display for SegfaultErrorCode { 609 | fn fmt(&self, f: &mut Formatter) -> FmtResult { 610 | if self.use_of_reserved_bit { 611 | write!(f, "use of reserved bits in the page table entry detected") 612 | } else if self.protection_keys_block_access { 613 | write!(f, "protection keys block access (needs more documentation)") 614 | } else { 615 | let data_or_instruction = match self.instruction_fetch { 616 | false => "data", 617 | true => "instruction fetch", 618 | }; 619 | 620 | write!( 621 | f, 622 | "{} triggered by a {}-mode {} {}", 623 | self.reason, self.access_mode, data_or_instruction, self.access_type 624 | ) 625 | } 626 | } 627 | } 628 | 629 | /// The type of Fatal Signal detected 630 | /// Comprehensive list of POSIX signals in the linux kernel 631 | /// can be found int he kernel source tree: 632 | /// https://github.com/torvalds/linux/blob/master/include/linux/signal.h#L339 633 | /// 634 | /// A bit more detail may be found in the man-pages: 635 | /// http://man7.org/linux/man-pages/man7/signal.7.html 636 | #[derive(Debug, PartialEq, EnumString, Display, Copy, Clone, FromPrimitive, Serialize)] 637 | #[cfg_attr(test, derive(Deserialize))] 638 | pub enum FatalSignalType { 639 | /// Hangup detected on controlling terminal or death of controlling process 640 | Hup = 1, 641 | 642 | /// Interrupt from keyboard 643 | Int, 644 | 645 | /// Quit from keyboard 646 | Quit, 647 | 648 | /// Illegal Instruction 649 | Ill, 650 | 651 | /// Trace/breakpoint trap (typically used by debuggers) 652 | Trap, 653 | 654 | /// IOT trap or Abort signal from abort: http://man7.org/linux/man-pages/man3/abort.3.html. (synonym: SIGABRT) 655 | Iot, 656 | 657 | /// Bus error (bad memory access) 658 | Bus, 659 | 660 | /// Floating-point exception 661 | Fpe, 662 | 663 | /// Kill signal 664 | Kill, 665 | 666 | /// User-defined signal 1 667 | Usr1, 668 | 669 | /// Invalid memory reference 670 | Segv, 671 | 672 | /// User-defined signal 2 673 | Usr2, 674 | 675 | /// Broken pipe: write to pipe with no readers; see: http://man7.org/linux/man-pages/man7/pipe.7.html 676 | Pipe, 677 | 678 | /// Timer signal from alarm: http://man7.org/linux/man-pages/man2/alarm.2.html 679 | Alrm, 680 | 681 | /// Termination signal 682 | Term, 683 | 684 | /// Stack fault on coprocessor (unused) 685 | StkFlt, 686 | 687 | /// Child stopped or terminated (synonym: SIGCLD) 688 | Chld, 689 | 690 | /// Continue if stopped (typically used by debuggers) 691 | Cont, 692 | 693 | /// Stop process (typically used by debuggers) 694 | Stop, 695 | 696 | /// Stop typed at terminal 697 | Tstp, 698 | 699 | /// Terminal input for background process 700 | Ttin, 701 | 702 | /// Terminal output for background process 703 | Ttou, 704 | 705 | /// Urgent condition on socket (4.2BSD) 706 | Urg, 707 | 708 | /// CPU time limit exceeded (4.2BSD); See: http://man7.org/linux/man-pages/man2/setrlimit.2.html 709 | Xcpu, 710 | 711 | /// File size limit exceeded (4.2BSD); See: http://man7.org/linux/man-pages/man2/setrlimit.2.html 712 | Xfsz, 713 | 714 | /// Virtual alarm clock (4.2BSD) 715 | VtAlrm, 716 | 717 | /// Profiling timer expired 718 | Prof, 719 | 720 | /// Window resize signal (4.3BSD, Sun) 721 | Winch, 722 | 723 | /// I/O now possible (4.2BSD) or Pollable event (Sys V). (synonym: SIGPOLL) 724 | Io, 725 | 726 | /// Power failure (System V) (synonym: SIGINFO) 727 | Pwd, 728 | } 729 | 730 | /// Convert an OffsetDateTime to ISO string representation 731 | fn datetime_to_iso8601(d: &OffsetDateTime, serializer: S) -> Result 732 | where 733 | S: Serializer, 734 | { 735 | match d.format(&Rfc3339) { 736 | Ok(formatted) => serializer.serialize_str(formatted.as_str()), 737 | Err(e) => Err(SerializeError::custom(format!("{}", e))), 738 | } 739 | } 740 | 741 | /// Convert an ISO DateTime string representation to OffsetDateTime 742 | #[cfg(test)] 743 | fn iso8601_to_datetime<'de, D>(deserializer: D) -> Result 744 | where 745 | D: de::Deserializer<'de>, 746 | { 747 | let timestr = String::deserialize(deserializer)?; 748 | OffsetDateTime::parse(timestr.as_str(), &Rfc3339).map_err(|e| { 749 | de::Error::custom(format!( 750 | "Error deserializing OffsetDateTime from string {} with format {:?}: {}", 751 | timestr, &Rfc3339, e 752 | )) 753 | }) 754 | } 755 | 756 | #[cfg(test)] 757 | mod test { 758 | use super::*; 759 | 760 | #[test] 761 | fn deserialize_timestamp() { 762 | let timestamp_original = OffsetDateTime::now_utc(); 763 | let timestamp_str = timestamp_original.format(&Rfc3339).unwrap(); 764 | let timestamp_rehydrated = OffsetDateTime::parse(×tamp_str, &Rfc3339).unwrap(); 765 | assert_eq!(timestamp_original, timestamp_rehydrated); 766 | } 767 | 768 | #[test] 769 | fn ser_de() { 770 | let event_original = Version::V1 { 771 | timestamp: OffsetDateTime::now_utc(), 772 | hostname: None, 773 | event: EventType::ConfigMismatch(ConfigMismatch { 774 | key: "test".to_owned(), 775 | expected_value: "this".to_owned(), 776 | observed_value: "that".to_owned(), 777 | }), 778 | }; 779 | 780 | let jstr = serde_json::to_string(&event_original).unwrap(); 781 | 782 | let event_rehydrated: Version = serde_json::from_str(jstr.as_str()).unwrap(); 783 | 784 | assert_eq!(event_original, event_rehydrated); 785 | } 786 | } 787 | -------------------------------------------------------------------------------- /src/formatter/cef.rs: -------------------------------------------------------------------------------- 1 | use crate::events; 2 | use crate::formatter::FormatResult; 3 | use rust_cef::ToCef; 4 | 5 | pub struct CefFormatter {} 6 | impl CefFormatter { 7 | pub fn format(&self, event: &events::Version) -> FormatResult { 8 | Ok(event.to_cef()?) 9 | } 10 | } 11 | 12 | /**********************************************************************************/ 13 | // Tests! Tests! Tests! 14 | 15 | #[cfg(test)] 16 | mod test { 17 | use super::*; 18 | use std::collections::BTreeMap; 19 | use time::OffsetDateTime; 20 | 21 | #[test] 22 | fn test_linux_kernel_trap() { 23 | let timestamp = OffsetDateTime::from_unix_timestamp_nanos(471804323000000).unwrap(); 24 | 25 | let event1 = events::Version::V1 { 26 | timestamp, 27 | hostname: Some("hostnamecef".to_owned()), 28 | event: events::EventType::LinuxKernelTrap(events::LinuxKernelTrap { 29 | facility: rmesg::entry::LogFacility::Kern, 30 | level: rmesg::entry::LogLevel::Warning, 31 | trap: events::KernelTrapType::Segfault { location: 0 }, 32 | procname: String::from("a.out"), 33 | pid: 36275, 34 | ip: 0x0, 35 | sp: 0x00007ffd5833d0c0, 36 | errcode: events::SegfaultErrorCode { 37 | reason: events::SegfaultReason::NoPageFound, 38 | access_type: events::SegfaultAccessType::Read, 39 | access_mode: events::SegfaultAccessMode::User, 40 | use_of_reserved_bit: false, 41 | instruction_fetch: false, 42 | protection_keys_block_access: false, 43 | }, 44 | file: Some(String::from("a.out")), 45 | vmastart: Some(0x561bc8d8f000), 46 | vmasize: Some(0x1000), 47 | }), 48 | }; 49 | 50 | let formatter = CefFormatter {}; 51 | 52 | assert_eq!( 53 | formatter.format(&event1).unwrap(), 54 | "CEF:0|polyverse|zerotect|1.0|LinuxKernelTrap|Linux Kernel Trap|10|PolyverseZerotectInstructionPointerValue=0 PolyverseZerotectStackPointerValue=140726083244224 cn2=94677333766144 cn2Label=vmastart cn3=4096 cn3Label=vmasize cs2=Read cs2Label=access_type cs3=User cs3Label=access_mode cs4=false cs4Label=use_of_reserved_bit cs5=false cs5Label=instruction_fetch cs6=false cs6Label=protection_keys_block_access dhost=hostnamecef dpid=36275 dproc=a.out flexString2=Segfault at location 0 flexString2Label=signal fname=a.out reason=NoPageFound rt=471804323" 55 | ); 56 | } 57 | 58 | #[test] 59 | fn test_linux_fatal_signal() { 60 | let timestamp = OffsetDateTime::from_unix_timestamp_nanos(471804323000000).unwrap(); 61 | 62 | let event1 = events::Version::V1 { 63 | timestamp, 64 | hostname: None, 65 | event: events::EventType::LinuxFatalSignal(events::LinuxFatalSignal { 66 | facility: rmesg::entry::LogFacility::Kern, 67 | level: rmesg::entry::LogLevel::Warning, 68 | signal: events::FatalSignalType::Segv, 69 | stack_dump: BTreeMap::new(), 70 | }), 71 | }; 72 | 73 | let formatter = CefFormatter {}; 74 | 75 | assert_eq!( 76 | formatter.format(&event1).unwrap(), 77 | "CEF:0|polyverse|zerotect|1.0|LinuxFatalSignal|Linux Fatal Signal|10|flexString2=Segv flexString2Label=signal rt=471804323" 78 | ); 79 | } 80 | 81 | #[test] 82 | fn test_linux_suppressed_callback() { 83 | let timestamp = OffsetDateTime::from_unix_timestamp_nanos(471804323000000).unwrap(); 84 | 85 | let event1 = events::Version::V1 { 86 | timestamp, 87 | hostname: Some("hostnamecef".to_owned()), 88 | event: events::EventType::LinuxSuppressedCallback(events::LinuxSuppressedCallback { 89 | facility: rmesg::entry::LogFacility::Kern, 90 | level: rmesg::entry::LogLevel::Warning, 91 | function_name: "show_signal_msg".to_owned(), 92 | count: 9, 93 | }), 94 | }; 95 | 96 | let formatter = CefFormatter {}; 97 | assert_eq!(formatter.format(&event1).unwrap(), "CEF:0|polyverse|zerotect|1.0|LinuxSuppressedCallback|Linux kernel suppressed repetitive log entries|3|cnt=9 dhost=hostnamecef flexString1=show_signal_msg flexString1Label=function_name rt=471804323"); 98 | } 99 | 100 | #[test] 101 | fn test_zerotect_config_mismatch() { 102 | let timestamp = OffsetDateTime::from_unix_timestamp_nanos(471804323000000).unwrap(); 103 | 104 | let event1 = events::Version::V1 { 105 | timestamp, 106 | hostname: Some("hostnamecef".to_owned()), 107 | event: events::EventType::ConfigMismatch(events::ConfigMismatch { 108 | key: "/sys/module/printk/parameters/time".to_owned(), 109 | expected_value: "Y".to_owned(), 110 | observed_value: "N".to_owned(), 111 | }), 112 | }; 113 | 114 | let formatter = CefFormatter {}; 115 | 116 | assert_eq!(formatter.format(&event1).unwrap(), "CEF:0|polyverse|zerotect|1.0|ConfigMismatch|Configuration mismatched what zerotect expected|4|PolyverseZerotectExpectedValue=Y PolyverseZerotectKey=/sys/module/printk/parameters/time PolyverseZerotectObservedValue=N dhost=hostnamecef rt=471804323"); 117 | } 118 | 119 | #[test] 120 | fn test_zerotect_register_probe() { 121 | let timestamp = OffsetDateTime::from_unix_timestamp_nanos(471804323000000).unwrap(); 122 | 123 | let event1 = events::Version::V1 { 124 | timestamp, 125 | hostname: Some("hostnamecef".to_owned()), 126 | event: events::EventType::RegisterProbe(events::RegisterProbe { 127 | register: "RIP".to_owned(), 128 | message: "Instruction pointer".to_owned(), 129 | procname: "nginx".to_owned(), 130 | justification: events::RegisterProbeJustification::FullEvents(vec![]), 131 | }), 132 | }; 133 | 134 | let formatter = CefFormatter {}; 135 | 136 | assert_eq!(formatter.format(&event1).unwrap(), "CEF:0|polyverse|zerotect|1.0|RegisterProbe|Probe using Register Increment|10|cn1=0 cn1Label=justifying_event_count cs1=RIP cs1Label=register dhost=hostnamecef dproc=nginx msg=Instruction pointer rt=471804323"); 137 | } 138 | } 139 | -------------------------------------------------------------------------------- /src/formatter/error.rs: -------------------------------------------------------------------------------- 1 | use std::convert::From; 2 | use std::error::Error; 3 | use std::fmt::{Display, Formatter, Result as FmtResult}; 4 | 5 | #[derive(Debug)] 6 | pub enum FormatError { 7 | JsonError(String), 8 | CefConversionError(String), 9 | } 10 | impl Error for FormatError {} 11 | impl Display for FormatError { 12 | fn fmt(&self, f: &mut Formatter) -> FmtResult { 13 | match &self { 14 | FormatError::JsonError(s) => write!(f, "FormatError::JsonError: {}", s), 15 | FormatError::CefConversionError(s) => { 16 | write!(f, "FormatError::CefConversionError: {}", s) 17 | } 18 | } 19 | } 20 | } 21 | impl From for FormatError { 22 | fn from(value: serde_json::Error) -> FormatError { 23 | FormatError::JsonError(format!("{}", value)) 24 | } 25 | } 26 | impl From for FormatError { 27 | fn from(value: rust_cef::CefConversionError) -> FormatError { 28 | FormatError::CefConversionError(format!("{}", value)) 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /src/formatter/json.rs: -------------------------------------------------------------------------------- 1 | use crate::events; 2 | use crate::formatter::FormatResult; 3 | 4 | pub struct JsonFormatter {} 5 | impl JsonFormatter { 6 | pub fn format(&self, event: &events::Version) -> FormatResult { 7 | Ok(serde_json::to_string(&event)?) 8 | } 9 | } 10 | 11 | /**********************************************************************************/ 12 | // Tests! Tests! Tests! 13 | 14 | #[cfg(test)] 15 | mod test { 16 | use super::*; 17 | use std::collections::BTreeMap; 18 | use time::OffsetDateTime; 19 | 20 | #[test] 21 | fn test_linux_kernel_trap() { 22 | let timestamp = OffsetDateTime::from_unix_timestamp_nanos(471804323000000).unwrap(); 23 | 24 | let event1 = events::Version::V1 { 25 | timestamp, 26 | hostname: Some("hostnamejson".to_owned()), 27 | event: events::EventType::LinuxKernelTrap(events::LinuxKernelTrap { 28 | facility: rmesg::entry::LogFacility::Kern, 29 | level: rmesg::entry::LogLevel::Warning, 30 | trap: events::KernelTrapType::Segfault { location: 0 }, 31 | procname: String::from("a.out"), 32 | pid: 36275, 33 | ip: 0x0, 34 | sp: 0x00007ffd5833d0c0, 35 | errcode: events::SegfaultErrorCode { 36 | reason: events::SegfaultReason::NoPageFound, 37 | access_type: events::SegfaultAccessType::Read, 38 | access_mode: events::SegfaultAccessMode::User, 39 | use_of_reserved_bit: false, 40 | instruction_fetch: false, 41 | protection_keys_block_access: false, 42 | }, 43 | file: Some(String::from("a.out")), 44 | vmastart: Some(0x561bc8d8f000), 45 | vmasize: Some(0x1000), 46 | }), 47 | }; 48 | 49 | let formatter = JsonFormatter {}; 50 | 51 | assert_eq!( 52 | formatter.format(&event1).unwrap(), 53 | "{\"version\":\"V1\",\"timestamp\":\"1970-01-06T11:03:24.323Z\",\"hostname\":\"hostnamejson\",\"event\":{\"type\":\"LinuxKernelTrap\",\"level\":\"Warning\",\"facility\":\"Kern\",\"trap\":{\"type\":\"Segfault\",\"location\":0},\"procname\":\"a.out\",\"pid\":36275,\"ip\":0,\"sp\":140726083244224,\"errcode\":{\"reason\":\"NoPageFound\",\"access_type\":\"Read\",\"access_mode\":\"User\",\"use_of_reserved_bit\":false,\"instruction_fetch\":false,\"protection_keys_block_access\":false},\"file\":\"a.out\",\"vmastart\":94677333766144,\"vmasize\":4096}}" 54 | ); 55 | } 56 | 57 | #[test] 58 | fn test_linux_fatal_signal() { 59 | let timestamp = OffsetDateTime::from_unix_timestamp_nanos(471804323000000).unwrap(); 60 | 61 | let event1 = events::Version::V1 { 62 | timestamp, 63 | hostname: Some("hostnamejson".to_owned()), 64 | event: events::EventType::LinuxFatalSignal(events::LinuxFatalSignal { 65 | facility: rmesg::entry::LogFacility::Kern, 66 | level: rmesg::entry::LogLevel::Warning, 67 | signal: events::FatalSignalType::Segv, 68 | stack_dump: BTreeMap::new(), 69 | }), 70 | }; 71 | 72 | let formatter = JsonFormatter {}; 73 | 74 | assert_eq!( 75 | formatter.format(&event1).unwrap(), 76 | "{\"version\":\"V1\",\"timestamp\":\"1970-01-06T11:03:24.323Z\",\"hostname\":\"hostnamejson\",\"event\":{\"type\":\"LinuxFatalSignal\",\"level\":\"Warning\",\"facility\":\"Kern\",\"signal\":\"Segv\",\"stack_dump\":{}}}" 77 | ); 78 | } 79 | 80 | #[test] 81 | fn test_linux_suppressed_callback() { 82 | let timestamp = OffsetDateTime::from_unix_timestamp_nanos(471804323000000).unwrap(); 83 | 84 | let event1 = events::Version::V1 { 85 | timestamp, 86 | hostname: None, 87 | event: events::EventType::LinuxSuppressedCallback(events::LinuxSuppressedCallback { 88 | facility: rmesg::entry::LogFacility::Kern, 89 | level: rmesg::entry::LogLevel::Warning, 90 | function_name: "show_signal_msg".to_owned(), 91 | count: 9, 92 | }), 93 | }; 94 | 95 | let formatter = JsonFormatter {}; 96 | 97 | assert_eq!(formatter.format(&event1).unwrap(), "{\"version\":\"V1\",\"timestamp\":\"1970-01-06T11:03:24.323Z\",\"event\":{\"type\":\"LinuxSuppressedCallback\",\"level\":\"Warning\",\"facility\":\"Kern\",\"function_name\":\"show_signal_msg\",\"count\":9}}"); 98 | } 99 | 100 | #[test] 101 | fn test_zerotect_config_mismatch() { 102 | let timestamp = OffsetDateTime::from_unix_timestamp_nanos(471804323000000).unwrap(); 103 | 104 | let event1 = events::Version::V1 { 105 | timestamp, 106 | hostname: Some("hostnamejson".to_owned()), 107 | event: events::EventType::ConfigMismatch(events::ConfigMismatch { 108 | key: "/sys/module/printk/parameters/time".to_owned(), 109 | expected_value: "Y".to_owned(), 110 | observed_value: "N".to_owned(), 111 | }), 112 | }; 113 | 114 | let formatter = JsonFormatter {}; 115 | 116 | assert_eq!(formatter.format(&event1).unwrap(), "{\"version\":\"V1\",\"timestamp\":\"1970-01-06T11:03:24.323Z\",\"hostname\":\"hostnamejson\",\"event\":{\"type\":\"ConfigMismatch\",\"key\":\"/sys/module/printk/parameters/time\",\"expected_value\":\"Y\",\"observed_value\":\"N\"}}"); 117 | } 118 | 119 | #[test] 120 | fn test_zerotect_register_probe() { 121 | let timestamp = OffsetDateTime::from_unix_timestamp_nanos(471804323000000).unwrap(); 122 | 123 | let event1 = events::Version::V1 { 124 | timestamp, 125 | hostname: Some("hostnamejson".to_owned()), 126 | event: events::EventType::RegisterProbe(events::RegisterProbe { 127 | register: "RIP".to_owned(), 128 | message: "Instruction pointer".to_owned(), 129 | procname: "nginx".to_owned(), 130 | justification: events::RegisterProbeJustification::FullEvents(vec![]), 131 | }), 132 | }; 133 | 134 | let formatter = JsonFormatter {}; 135 | 136 | assert_eq!(formatter.format(&event1).unwrap(), "{\"version\":\"V1\",\"timestamp\":\"1970-01-06T11:03:24.323Z\",\"hostname\":\"hostnamejson\",\"event\":{\"type\":\"RegisterProbe\",\"register\":\"RIP\",\"message\":\"Instruction pointer\",\"procname\":\"nginx\",\"justification\":{\"FullEvents\":[]}}}"); 137 | } 138 | } 139 | -------------------------------------------------------------------------------- /src/formatter/mod.rs: -------------------------------------------------------------------------------- 1 | mod cef; 2 | mod error; 3 | mod json; 4 | mod text; 5 | 6 | use crate::events::Version; 7 | use crate::params::OutputFormat; 8 | pub use cef::CefFormatter; 9 | pub use error::FormatError; 10 | pub use json::JsonFormatter; 11 | pub use text::TextFormatter; 12 | 13 | pub type FormatResult = Result; 14 | 15 | pub enum Formatter { 16 | Cef(CefFormatter), 17 | Json(JsonFormatter), 18 | Text(TextFormatter), 19 | } 20 | impl Formatter { 21 | pub fn format(&self, value: &Version) -> FormatResult { 22 | match self { 23 | Self::Cef(f) => f.format(value), 24 | Self::Json(f) => f.format(value), 25 | Self::Text(f) => f.format(value), 26 | } 27 | } 28 | } 29 | 30 | pub fn new(format: &OutputFormat) -> Formatter { 31 | match format { 32 | OutputFormat::Text => Formatter::Text(TextFormatter {}), 33 | OutputFormat::Json => Formatter::Json(JsonFormatter {}), 34 | OutputFormat::Cef => Formatter::Cef(CefFormatter {}), 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /src/formatter/text.rs: -------------------------------------------------------------------------------- 1 | use crate::events::Version; 2 | use crate::formatter::FormatResult; 3 | 4 | pub struct TextFormatter {} 5 | impl TextFormatter { 6 | pub fn format(&self, event: &Version) -> FormatResult { 7 | Ok(format!("{}", event)) 8 | } 9 | } 10 | 11 | /**********************************************************************************/ 12 | // NO Tests! NO Tests! NO Tests! 13 | // Text formatting is the Display trait. We make no guarantees on stability 14 | // of text formatting. They are not meant to be parsable. Use JSON if you want 15 | // backwards compatible, well-defined serializations that don't break arbitrarily. 16 | /**********************************************************************************/ 17 | -------------------------------------------------------------------------------- /src/main.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2019 Polyverse Corporation 2 | 3 | mod analyzer; 4 | mod common; 5 | mod emitter; 6 | mod events; 7 | mod formatter; 8 | mod params; 9 | mod raw_event_stream; 10 | mod system; 11 | 12 | use std::error::Error; 13 | use std::fmt::{Display, Formatter, Result as FmtResult}; 14 | use std::process; 15 | use std::time::Duration; 16 | use tokio_stream::StreamExt; 17 | 18 | #[derive(Debug)] 19 | pub struct MainError(String); 20 | impl Error for MainError {} 21 | impl Display for MainError { 22 | fn fmt(&self, f: &mut Formatter) -> FmtResult { 23 | write!(f, "MainError:: {}", self.0) 24 | } 25 | } 26 | impl From for MainError { 27 | fn from(err: system::SystemConfigError) -> Self { 28 | Self(format!("Inner system::SystemConfigError :: {}", err)) 29 | } 30 | } 31 | impl From for MainError { 32 | fn from(err: raw_event_stream::RawEventStreamError) -> Self { 33 | Self(format!( 34 | "Inner raw_event_stream::RawEventStreamError :: {}", 35 | err 36 | )) 37 | } 38 | } 39 | 40 | #[tokio::main(flavor = "current_thread")] 41 | async fn main() -> Result<(), Box> { 42 | if let Err(e) = system::ensure_linux() { 43 | eprintln!( 44 | "Error ensuring the operating system we're running on is Linux: {}", 45 | e 46 | ); 47 | process::exit(1); 48 | } 49 | 50 | let zerotect_config = match params::parse_args(None) { 51 | Ok(pc) => pc, 52 | Err(e) => match e.inner_error { 53 | params::InnerError::ClapError(ce) => ce.exit(), 54 | _ => { 55 | eprintln!("Error when parsing configuration parameters (whether from CLI or from config file): {}", e); 56 | process::exit(1); 57 | } 58 | }, 59 | }; 60 | 61 | let auto_configure_env = zerotect_config.auto_configure; 62 | let chostname = zerotect_config.hostname.clone(); 63 | // ensure environment is kept stable every 5 minutes (in case something or someone disables the settings) 64 | let mut config_events_stream = 65 | system::EnvironmentConfigurator::create_environment_configrator_stream( 66 | auto_configure_env, 67 | chostname, 68 | ); 69 | 70 | // enforce config before we create raw event stream, 71 | // since the config affects how it works 72 | if let Err(e) = config_events_stream.enforce_config() { 73 | panic!("Error in Environment Configurator. Panicking. {}", e); 74 | } 75 | 76 | let resc = raw_event_stream::RawEventStreamConfig { 77 | verbosity: zerotect_config.verbosity, 78 | hostname: zerotect_config.hostname.clone(), 79 | gobble_old_events: zerotect_config.monitor.gobble_old_events, 80 | flush_timeout: Duration::from_secs(1), 81 | }; 82 | let os_event_stream = 83 | raw_event_stream::RawEventStream::::create_raw_event_stream(resc) 84 | .await?; 85 | 86 | // get a unified stream of all incoming events... 87 | let merged_events_stream = os_event_stream.merge(config_events_stream); 88 | 89 | let analyzed_stream = Box::pin( 90 | analyzer::Analyzer::analyzer_over_stream( 91 | zerotect_config.verbosity, 92 | zerotect_config.analytics, 93 | merged_events_stream, 94 | ) 95 | .await?, 96 | ); 97 | 98 | // split these up before a move 99 | let ec = emitter::EmitterConfig { 100 | verbosity: zerotect_config.verbosity, 101 | console: zerotect_config.console, 102 | polycorder: zerotect_config.polycorder, 103 | syslog: zerotect_config.syslog, 104 | logfile: zerotect_config.logfile, 105 | pagerduty_routing_key: zerotect_config.pagerduty_routing_key, 106 | }; 107 | 108 | emitter::emit_forever(ec, analyzed_stream, zerotect_config.hostname).await?; 109 | 110 | Ok(()) 111 | } 112 | -------------------------------------------------------------------------------- /src/system.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2019 Polyverse Corporation 2 | 3 | use crate::events; 4 | use crate::params; 5 | use core::future::Future; 6 | use core::pin::Pin; 7 | use futures::stream::Stream; 8 | use futures::task::{Context, Poll}; 9 | use std::error::Error; 10 | use std::fmt::{Display, Formatter, Result as FmtResult}; 11 | use std::fs; 12 | use std::io; 13 | use std::num; 14 | use std::ops::Sub; 15 | use std::rc::Rc; 16 | use std::str; 17 | use sys_info::os_type; 18 | use sysctl::Sysctl; 19 | use tokio::time::{sleep, Sleep}; 20 | 21 | use std::time::Duration; 22 | use time::OffsetDateTime; 23 | 24 | pub const PRINT_FATAL_SIGNALS_CTLNAME: &str = "kernel.print-fatal-signals"; 25 | pub const EXCEPTION_TRACE_CTLNAME: &str = "debug.exception-trace"; 26 | pub const KLOG_INCLUDE_TIMESTAMP: &str = "klog.include-timestamp"; 27 | pub const PROC_UPTIME: &str = "/proc/uptime"; 28 | 29 | #[derive(Debug)] 30 | pub struct SystemConfigError(String); 31 | impl Error for SystemConfigError {} 32 | impl Display for SystemConfigError { 33 | fn fmt(&self, f: &mut Formatter) -> FmtResult { 34 | write!(f, "SystemConfigError:: {}", self.0) 35 | } 36 | } 37 | impl From for SystemConfigError { 38 | fn from(err: sysctl::SysctlError) -> Self { 39 | Self(format!("Inner sysctl::SysctlError :: {}", err)) 40 | } 41 | } 42 | impl From for SystemConfigError { 43 | fn from(err: rmesg::error::RMesgError) -> Self { 44 | Self(format!("Inner rmesg::error::RMesgError :: {}", err)) 45 | } 46 | } 47 | impl From for SystemConfigError { 48 | fn from(err: io::Error) -> Self { 49 | Self(format!("Inner io::Error :: {}", err)) 50 | } 51 | } 52 | impl From for SystemConfigError { 53 | fn from(err: str::Utf8Error) -> Self { 54 | Self(format!("Inner str::Utf8Error :: {}", err)) 55 | } 56 | } 57 | impl From for SystemConfigError { 58 | fn from(err: num::ParseFloatError) -> Self { 59 | Self(format!("Inner num::ParseFloatError :: {}", err)) 60 | } 61 | } 62 | impl From for SystemConfigError { 63 | fn from(err: sys_info::Error) -> Self { 64 | Self(format!("Inner sys_info::Error :: {}", err)) 65 | } 66 | } 67 | 68 | pub struct EnvironmentConfigurator { 69 | auto_config: params::AutoConfigure, 70 | sleep_interval: Duration, 71 | hostname: Option, 72 | change_events: Vec, 73 | sleep_future: Option>>, 74 | } 75 | impl EnvironmentConfigurator { 76 | pub fn create_environment_configrator_stream( 77 | auto_config: params::AutoConfigure, 78 | hostname: Option, 79 | ) -> Self { 80 | Self { 81 | auto_config, 82 | sleep_interval: Duration::from_secs(300), 83 | hostname, 84 | change_events: Vec::new(), 85 | sleep_future: None, 86 | } 87 | } 88 | 89 | pub fn enforce_config(&mut self) -> Result<(), SystemConfigError> { 90 | // if not sleeping.. reinforce the system with config 91 | let events = modify_environment(&self.auto_config, &self.hostname)?; 92 | for event in events.into_iter() { 93 | eprintln!("Configuration modified. {}", &event); 94 | self.change_events.push(event); 95 | } 96 | 97 | Ok(()) 98 | } 99 | } 100 | impl Stream for EnvironmentConfigurator { 101 | type Item = events::Event; 102 | 103 | fn poll_next( 104 | mut self: Pin<&mut Self>, 105 | cx: &mut Context<'_>, 106 | ) -> Poll::Item>> { 107 | // if already sleeping... handle that. 108 | if let Some(mut sf) = self.sleep_future.take() { 109 | match Future::poll(sf.as_mut(), cx) { 110 | // still sleeping? Go back to sleep. 111 | Poll::Pending => { 112 | // put the future back in 113 | self.sleep_future = Some(sf); 114 | return Poll::Pending; 115 | } 116 | 117 | // Not sleeping? continue... 118 | Poll::Ready(()) => {} 119 | } 120 | } 121 | 122 | // enforce configuration 123 | if let Err(e) = self.enforce_config() { 124 | panic!("Error in Environment Configurator. Panicking. {}", e); 125 | } 126 | 127 | // entries empty? then go to sleep... 128 | if self.change_events.is_empty() { 129 | let sf = sleep(self.sleep_interval); 130 | let mut pinned_sf = Box::pin(sf); 131 | match Future::poll(pinned_sf.as_mut(), cx) { 132 | Poll::Pending => { 133 | self.sleep_future = Some(pinned_sf); 134 | return Poll::Pending; 135 | } 136 | Poll::Ready(_) => { 137 | eprintln!("Sleep future did not return Poll::Pending as expected despite being asked to sleep for {:?}", self.sleep_interval); 138 | return Poll::Pending; 139 | } 140 | } 141 | } 142 | 143 | Poll::Ready(Some(self.change_events.remove(0))) 144 | } 145 | } 146 | 147 | pub fn system_start_time() -> Result { 148 | let system_uptime_nanos: u64 = (system_uptime_secs()? * 1000000000.0) as u64; 149 | Ok(OffsetDateTime::now_utc().sub(Duration::from_nanos(system_uptime_nanos))) 150 | } 151 | 152 | // https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/6/html/deployment_guide/s2-proc-uptime 153 | pub fn system_uptime_secs() -> Result { 154 | let contentsu8 = fs::read(PROC_UPTIME)?; 155 | let contents = str::from_utf8(&contentsu8)?; 156 | match contents.split_whitespace().next() { 157 | None => Err(SystemConfigError(format!("Contents of the file {} not what was expected. Unable to parse the first number from: {}", PROC_UPTIME, contents))), 158 | Some(numstr) => Ok(numstr.trim().parse::()?), 159 | } 160 | } 161 | 162 | pub fn ensure_linux() -> Result<(), SystemConfigError> { 163 | let osname = os_type()?; 164 | if osname != "Linux" { 165 | return Err(SystemConfigError(format!("The Operating System detected is {} and not supported. This program modifies operating system settings in funamental ways and thus fails safely when it is not supported.", osname))); 166 | } 167 | Ok(()) 168 | } 169 | 170 | pub fn modify_environment( 171 | auto_configure: ¶ms::AutoConfigure, 172 | hostname: &Option, 173 | ) -> Result, SystemConfigError> { 174 | let mut env_events = Vec::::new(); 175 | 176 | eprintln!("Configuring kernel paramters as requested..."); 177 | if auto_configure.exception_trace { 178 | let maybe_event = ensure_systemctl( 179 | hostname, 180 | EXCEPTION_TRACE_CTLNAME, 181 | bool_to_0_1_string(auto_configure.exception_trace), 182 | )?; 183 | if let Some(event) = maybe_event { 184 | env_events.push(event); 185 | } 186 | } 187 | 188 | if auto_configure.fatal_signals { 189 | let maybe_event = ensure_systemctl( 190 | hostname, 191 | PRINT_FATAL_SIGNALS_CTLNAME, 192 | bool_to_0_1_string(auto_configure.fatal_signals), 193 | )?; 194 | if let Some(event) = maybe_event { 195 | env_events.push(event); 196 | } 197 | } 198 | 199 | if auto_configure.klog_include_timestamp && !rmesg::klogctl::klog_timestamps_enabled()? { 200 | rmesg::klogctl::klog_timestamps_enable(true)?; 201 | 202 | env_events.push(Rc::new(events::Version::V1 { 203 | timestamp: OffsetDateTime::now_utc(), 204 | hostname: hostname.clone(), 205 | event: events::EventType::ConfigMismatch(events::ConfigMismatch { 206 | key: rmesg::klogctl::SYS_MODULE_PRINTK_PARAMETERS_TIME.to_owned(), 207 | expected_value: "Y".to_owned(), 208 | observed_value: "N".to_owned(), 209 | }), 210 | })); 211 | } 212 | 213 | Ok(env_events) 214 | } 215 | 216 | fn ensure_systemctl( 217 | hostname: &Option, 218 | ctlstr: &str, 219 | valuestr: &str, 220 | ) -> Result, SystemConfigError> { 221 | eprintln!("==> Ensuring {} is set to {}", ctlstr, valuestr); 222 | 223 | let ctl = sysctl::Ctl::new(ctlstr)?; 224 | let prev_value_str = ctl.value_string()?; 225 | 226 | if prev_value_str.trim() == valuestr.trim() { 227 | eprintln!("====> Already enabled, not reenabling: {}", ctlstr); 228 | Ok(None) 229 | } else { 230 | ctl.set_value_string(valuestr)?; 231 | Ok(Some(Rc::new(events::Version::V1 { 232 | timestamp: OffsetDateTime::now_utc(), 233 | hostname: hostname.clone(), 234 | event: events::EventType::ConfigMismatch(events::ConfigMismatch { 235 | key: ctlstr.to_owned(), 236 | expected_value: valuestr.to_owned(), 237 | observed_value: prev_value_str, 238 | }), 239 | }))) 240 | } 241 | } 242 | 243 | fn bool_to_0_1_string(b: bool) -> &'static str { 244 | match b { 245 | false => "0\n", 246 | true => "1\n", 247 | } 248 | } 249 | -------------------------------------------------------------------------------- /usecase/datafaulter.c: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2019 Polyverse Corporation 2 | 3 | #include 4 | int main(int argc, char **argv) { 5 | int *p = 0; 6 | return printf("%d\n", *p); 7 | } -------------------------------------------------------------------------------- /usecase/instrfaulter.c: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2019 Polyverse Corporation 2 | 3 | int main(int argc, char **argv) { 4 | int (*functionPtr)() = 0; 5 | functionPtr(); 6 | } -------------------------------------------------------------------------------- /usecase/invalidopcode.c: -------------------------------------------------------------------------------- 1 | 2 | // Copyright (c) 2019 Polyverse Corporation 3 | 4 | int main() { 5 | char *data = "hello world random data"; 6 | void (*funcptr)() = (void*)data; 7 | // try to execute stuff that's not an instruction 8 | funcptr(); 9 | } 10 | -------------------------------------------------------------------------------- /usecase/segfault_at_location.c: -------------------------------------------------------------------------------- 1 | 2 | // Copyright (c) 2019 Polyverse Corporation 3 | 4 | void function1() { 5 | int collector = 5, i = 0; 6 | if (10) { 7 | for (i=0; i < 100; i++) { 8 | collector+=i; 9 | } 10 | } else { 11 | for (i=0; i > 35; i--) { 12 | collector-=i; 13 | } 14 | } 15 | } 16 | 17 | int main(int argc, char **argv) { 18 | void (*functionptr)() = function1; 19 | functionptr++; // go to an invalid place 20 | functionptr(); 21 | } 22 | --------------------------------------------------------------------------------