├── .editorconfig ├── .gitignore ├── .pre-commit-config.yaml ├── .secrets.baseline ├── .travis.yml ├── Dockerfile-test ├── LICENSE ├── Makefile ├── README.md ├── changelog.md ├── config.yaml.example ├── docker-compose.yml ├── docs ├── Makefile └── source │ ├── _static │ └── .gitkeep │ ├── conf.py │ ├── elastalert.rst │ ├── elastalert_status.rst │ ├── index.rst │ ├── recipes │ ├── adding_alerts.rst │ ├── adding_enhancements.rst │ ├── adding_loaders.rst │ ├── adding_rules.rst │ ├── signing_requests.rst │ └── writing_filters.rst │ ├── ruletypes.rst │ └── running_elastalert.rst ├── elastalert ├── __init__.py ├── alerts.py ├── auth.py ├── config.py ├── create_index.py ├── elastalert.py ├── enhancements.py ├── es_mappings │ ├── 5 │ │ ├── elastalert.json │ │ ├── elastalert_error.json │ │ ├── elastalert_status.json │ │ ├── past_elastalert.json │ │ └── silence.json │ └── 6 │ │ ├── elastalert.json │ │ ├── elastalert_error.json │ │ ├── elastalert_status.json │ │ ├── past_elastalert.json │ │ └── silence.json ├── kibana.py ├── kibana_discover.py ├── loaders.py ├── opsgenie.py ├── rule_from_kibana.py ├── ruletypes.py ├── schema.yaml ├── test_rule.py ├── util.py └── zabbix.py ├── example_rules ├── example_cardinality.yaml ├── example_change.yaml ├── example_frequency.yaml ├── example_new_term.yaml ├── example_opsgenie_frequency.yaml ├── example_percentage_match.yaml ├── example_single_metric_agg.yaml ├── example_spike.yaml ├── example_spike_single_metric_agg.yaml ├── jira_acct.txt ├── ssh-repeat-offender.yaml └── ssh.yaml ├── pytest.ini ├── requirements-dev.txt ├── requirements.txt ├── setup.cfg ├── setup.py ├── supervisord.conf.example ├── tests ├── __init__.py ├── alerts_test.py ├── auth_test.py ├── base_test.py ├── conftest.py ├── create_index_test.py ├── elasticsearch_test.py ├── kibana_discover_test.py ├── kibana_test.py ├── loaders_test.py ├── rules_test.py └── util_test.py └── tox.ini /.editorconfig: -------------------------------------------------------------------------------- 1 | root = true 2 | 3 | [*] 4 | end_of_line = lf 5 | insert_final_newline = true 6 | charset = utf-8 7 | 8 | [*.py] 9 | indent_style = space 10 | indent_size = 4 11 | 12 | [Makefile] 13 | indent_style = tab 14 | 15 | [{*.json,*.yml,*.yaml}] 16 | indent_style = space 17 | indent_size = 2 18 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | config.yaml 2 | .tox/ 3 | .coverage 4 | .idea/* 5 | .cache/ 6 | __pycache__/ 7 | *.pyc 8 | virtualenv_run/ 9 | *.egg-info/ 10 | dist/ 11 | venv/ 12 | env/ 13 | docs/build/ 14 | build/ 15 | .pytest_cache/ 16 | my_rules 17 | *.swp 18 | *~ 19 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: git://github.com/pre-commit/pre-commit-hooks 3 | sha: v1.1.1 4 | hooks: 5 | - id: trailing-whitespace 6 | - id: end-of-file-fixer 7 | - id: autopep8-wrapper 8 | args: 9 | - -i 10 | - --ignore=E265,E309,E501 11 | - id: flake8 12 | - id: check-yaml 13 | - id: debug-statements 14 | - id: requirements-txt-fixer 15 | - id: name-tests-test 16 | - repo: git://github.com/asottile/reorder_python_imports 17 | sha: v0.3.5 18 | hooks: 19 | - id: reorder-python-imports 20 | - repo: git://github.com/Yelp/detect-secrets 21 | sha: 0.9.1 22 | hooks: 23 | - id: detect-secrets 24 | args: ['--baseline', '.secrets.baseline'] 25 | exclude: .*tests/.*|.*yelp/testing/.*|\.pre-commit-config\.yaml 26 | -------------------------------------------------------------------------------- /.secrets.baseline: -------------------------------------------------------------------------------- 1 | { 2 | "exclude_regex": ".*tests/.*|.*yelp/testing/.*|\\.pre-commit-config\\.yaml", 3 | "generated_at": "2018-07-06T22:54:22Z", 4 | "plugins_used": [ 5 | { 6 | "base64_limit": 4.5, 7 | "name": "Base64HighEntropyString" 8 | }, 9 | { 10 | "hex_limit": 3, 11 | "name": "HexHighEntropyString" 12 | }, 13 | { 14 | "name": "PrivateKeyDetector" 15 | } 16 | ], 17 | "results": { 18 | ".travis.yml": [ 19 | { 20 | "hashed_secret": "4f7a1ea04dafcbfee994ee1d08857b8aaedf8065", 21 | "line_number": 14, 22 | "type": "Base64 High Entropy String" 23 | } 24 | ] 25 | }, 26 | "version": "0.9.1" 27 | } 28 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: python 2 | python: 3 | - '3.6' 4 | env: 5 | - TOXENV=docs 6 | - TOXENV=py36 7 | install: 8 | - pip install tox 9 | - > 10 | if [[ -n "${ES_VERSION}" ]] ; then 11 | wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-${ES_VERSION}.tar.gz 12 | mkdir elasticsearch-${ES_VERSION} && tar -xzf elasticsearch-${ES_VERSION}.tar.gz -C elasticsearch-${ES_VERSION} --strip-components=1 13 | ./elasticsearch-${ES_VERSION}/bin/elasticsearch & 14 | fi 15 | script: 16 | - > 17 | if [[ -n "${ES_VERSION}" ]] ; then 18 | wget -q --waitretry=1 --retry-connrefused --tries=30 -O - http://127.0.0.1:9200 19 | make test-elasticsearch 20 | else 21 | make test 22 | fi 23 | jobs: 24 | include: 25 | - stage: 'Elasticsearch test' 26 | env: TOXENV=py36 ES_VERSION=7.0.0-linux-x86_64 27 | - env: TOXENV=py36 ES_VERSION=6.6.2 28 | - env: TOXENV=py36 ES_VERSION=6.3.2 29 | - env: TOXENV=py36 ES_VERSION=6.2.4 30 | - env: TOXENV=py36 ES_VERSION=6.0.1 31 | - env: TOXENV=py36 ES_VERSION=5.6.16 32 | 33 | deploy: 34 | provider: pypi 35 | user: yelplabs 36 | password: 37 | secure: TpSTlFu89tciZzboIfitHhU5NhAB1L1/rI35eQTXstiqzYg2mweOuip+MPNx9AlX3Swg7MhaFYnSUvRqPljuoLjLD0EQ7BHLVSBFl92ukkAMTeKvM6LbB9HnGOwzmAvTR5coegk8IHiegudODWvnhIj4hp7/0EA+gVX7E55kEAw= 38 | on: 39 | tags: true 40 | distributions: sdist bdist_wheel 41 | repo: Yelp/elastalert 42 | branch: master 43 | -------------------------------------------------------------------------------- /Dockerfile-test: -------------------------------------------------------------------------------- 1 | FROM ubuntu:latest 2 | 3 | RUN apt-get update && apt-get upgrade -y 4 | RUN apt-get -y install build-essential python3.6 python3.6-dev python3-pip libssl-dev git 5 | 6 | WORKDIR /home/elastalert 7 | 8 | ADD requirements*.txt ./ 9 | RUN pip3 install -r requirements-dev.txt 10 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright [yyyy] [name of copyright owner] 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. 203 | 204 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: all production test docs clean 2 | 3 | all: production 4 | 5 | production: 6 | @true 7 | 8 | docs: 9 | tox -e docs 10 | 11 | dev: $(LOCAL_CONFIG_DIR) $(LOGS_DIR) install-hooks 12 | 13 | install-hooks: 14 | pre-commit install -f --install-hooks 15 | 16 | test: 17 | tox 18 | 19 | test-elasticsearch: 20 | tox -- --runelasticsearch 21 | 22 | test-docker: 23 | docker-compose --project-name elastalert build tox 24 | docker-compose --project-name elastalert run tox 25 | 26 | clean: 27 | make -C docs clean 28 | find . -name '*.pyc' -delete 29 | find . -name '__pycache__' -delete 30 | rm -rf virtualenv_run .tox .coverage *.egg-info build 31 | -------------------------------------------------------------------------------- /changelog.md: -------------------------------------------------------------------------------- 1 | # Change Log 2 | 3 | # v0.2.4 4 | 5 | ### Added 6 | - Added back customFields support for The Hive 7 | 8 | # v0.2.3 9 | 10 | ### Added 11 | - Added back TheHive alerter without TheHive4py library 12 | 13 | # v0.2.2 14 | 15 | ### Added 16 | - Integration with Kibana Discover app 17 | - Addied ability to specify opsgenie alert details  18 | 19 | ### Fixed 20 | - Fix some encoding issues with command alerter 21 | - Better error messages for missing config file 22 | - Fixed an issue with run_every not applying per-rule 23 | - Fixed an issue with rules not being removed 24 | - Fixed an issue with top count keys and nested query keys 25 | - Various documentation fixes 26 | - Fixed an issue with not being able to use spike aggregation 27 | 28 | ### Removed 29 | - Remove The Hive alerter 30 | 31 | # v0.2.1 32 | 33 | ### Fixed 34 | - Fixed an AttributeError introduced in 0.2.0 35 | 36 | # v0.2.0 37 | 38 | - Switched to Python 3 39 | 40 | ### Added 41 | - Add rule loader class for customized rule loading 42 | - Added thread based rules and limit_execution 43 | - Run_every can now be customized per rule 44 | 45 | ### Fixed 46 | - Various small fixes 47 | 48 | # v0.1.39 49 | 50 | ### Added 51 | - Added spike alerts for metric aggregations 52 | - Allow SSL connections for Stomp 53 | - Allow limits on alert text length 54 | - Add optional min doc count for terms queries 55 | - Add ability to index into arrays for alert_text_args, etc 56 | 57 | ### Fixed 58 | - Fixed bug involving --config flag with create-index 59 | - Fixed some settings not being inherited from the config properly 60 | - Some fixes for Hive alerter 61 | - Close SMTP connections properly 62 | - Fix timestamps in Pagerduty v2 payload 63 | - Fixed an bug causing aggregated alerts to mix up 64 | 65 | # v0.1.38 66 | 67 | ### Added 68 | - Added PagerTree alerter 69 | - Added Line alerter 70 | - Added more customizable logging 71 | - Added new logic in test-rule to detemine the default timeframe 72 | 73 | ### Fixed 74 | - Fixed an issue causing buffer_time to sometimes be ignored 75 | 76 | # v0.1.37 77 | 78 | ### Added 79 | - Added more options for Opsgenie alerter 80 | - Added more pagerduty options 81 | - Added ability to add metadata to elastalert logs 82 | 83 | ### Fixed 84 | - Fixed some documentation to be more clear 85 | - Stop requiring doc_type for metric aggregations 86 | - No longer puts quotes around regex terms in blacklists or whitelists 87 | 88 | # v0.1.36 89 | 90 | ### Added 91 | - Added a prefix "metric_" to the key used for metric aggregations to avoid possible conflicts 92 | - Added option to skip Alerta certificate validation 93 | 94 | ### Fixed 95 | - Fixed a typo in the documentation for spike rule 96 | 97 | # v0.1.35 98 | 99 | ### Fixed 100 | - Fixed an issue preventing new term rule from working with terms query 101 | 102 | # v0.1.34 103 | 104 | ### Added 105 | - Added prefix/suffix support for summary table 106 | - Added support for ignoring SSL validation in Slack 107 | - More visible exceptions during query parse failures 108 | 109 | ### Fixed 110 | - Fixed top_count_keys when using compound query_key 111 | - Fixed num_hits sometimes being reported too low 112 | - Fixed an issue with setting ES_USERNAME via env 113 | - Fixed an issue when using test script with custom timestamps 114 | - Fixed a unicode error when using Telegram 115 | - Fixed an issue with jsonschema version conflict 116 | - Fixed an issue with nested timestamps in cardinality type 117 | 118 | # v0.1.33 119 | 120 | ### Added 121 | - Added ability to pipe alert text to a command 122 | - Add --start and --end support for elastalert-test-rule 123 | - Added ability to turn blacklist/whitelist files into queries for better performance 124 | - Allow setting of OpsGenie priority 125 | - Add ability to query the adjacent index if timestamp_field not used for index timestamping 126 | - Add support for pagerduty v2 127 | - Add option to turn off .raw/.keyword field postfixing in new term rule 128 | - Added --use-downloaded feature for elastalert-test-rule 129 | 130 | ### Fixed 131 | - Fixed a bug that caused num_hits in matches to sometimes be erroneously small 132 | - Fixed an issue with HTTP Post alerter that could cause it to hang indefinitely 133 | - Fixed some issues with string formatting for various alerters 134 | - Fixed a couple of incorrect parts of the documentation 135 | 136 | # v0.1.32 137 | 138 | ### Added 139 | - Add support for setting ES url prefix via environment var 140 | - Add support for using native Slack fields in alerts 141 | 142 | ### Fixed 143 | - Fixed a bug that would could scrolling queries to sometimes terminate early 144 | 145 | # v0.1.31 146 | 147 | ### Added 148 | - Added ability to add start date to new term rule 149 | 150 | ### Fixed 151 | - Fixed a bug in create_index which would try to delete a nonexistent index 152 | - Apply filters to new term rule all terms query 153 | - Support Elasticsearch 6 for new term rule 154 | - Fixed is_enabled not working on rule changes 155 | 156 | 157 | # v0.1.30 158 | 159 | ### Added 160 | - Alerta alerter 161 | - Added support for transitioning JIRA issues 162 | - Option to recreate index in elastalert-create-index 163 | 164 | ### Fixed 165 | - Update jira_ custom fields before each alert if they were modified 166 | - Use json instead of simplejson 167 | - Allow for relative path for smtp_auth_file 168 | - Fixed some grammar issues 169 | - Better code formatting of index mappings 170 | - Better formatting and size limit for HipChat HTML 171 | - Fixed gif link in readme for kibana plugin 172 | - Fixed elastalert-test-rule with Elasticsearch > 4 173 | - Added documentation for is_enabled option 174 | 175 | ## v0.1.29 176 | 177 | ### Added 178 | - Added a feature forget_keys to prevent realerting when using flatline with query_key 179 | - Added a new alert_text_type, aggregation_summary_only 180 | 181 | ### Fixed 182 | - Fixed incorrect documentation about es_conn_timeout default 183 | 184 | ## v0.1.28 185 | 186 | ### Added 187 | - Added support for Stride formatting of simple HTML tags 188 | - Added support for custom titles in Opsgenie alerts 189 | - Added a denominator to percentage match based alerts 190 | 191 | ### Fixed 192 | - Fixed a bug with Stomp alerter connections 193 | - Removed escaping of some characaters in Slack messages 194 | 195 | ## v0.1.27 196 | 197 | # Added 198 | - Added support for a value other than in formatted alerts 199 | 200 | ### Fixed 201 | - Fixed a failed creation of elastalert indicies when using Elasticsearch 6 202 | - Truncate Telegram alerts to avoid API errors 203 | 204 | ## v0.1.26 205 | 206 | ### Added 207 | - Added support for Elasticsearch 6 208 | - Added support for mentions in Hipchat 209 | 210 | ### Fixed 211 | - Fixed an issue where a nested field lookup would crash if one of the intermediate fields was null 212 | 213 | ## v0.1.25 214 | 215 | ### Fixed 216 | - Fixed a bug causing new term rule to break unless you passed a start time 217 | - Add a slight clarification on the localhost:9200 reported in es_debug_trace 218 | 219 | ## v0.1.24 220 | 221 | ### Fixed 222 | - Pinned pytest 223 | - create-index reads index name from config.yaml 224 | - top_count_keys now works for context on a flatline rule type 225 | - Fixed JIRA behavior for issues with statuses that have spaces in the name 226 | 227 | ## v0.1.22 228 | 229 | ### Added 230 | - Added Stride alerter 231 | - Allow custom string formatters for aggregation percentage 232 | - Added a field to disable rules from config 233 | - Added support for subaggregations for the metric rule type 234 | 235 | ### Fixed 236 | - Fixed a bug causing create-index to fail if missing config.yaml 237 | - Fixed a bug when using ES5 with query_key and top_count_keys 238 | - Allow enhancements to set and clear arbitrary JIRA fields 239 | - Fixed a bug causing timestamps to be formatted in scientific notation 240 | - Stop attempting to initialize alerters in debug mode 241 | - Changed default alert ordering so that JIRA tickets end up in other alerts 242 | - Fixed a bug when using Stomp alerter with complex query_key 243 | - Fixed a bug preventing hipchat room ID from being an integer 244 | - Fixed a bug causing duplicate alerts when using spike with alert_on_new_data 245 | - Minor fixes to summary table formatting 246 | - Fixed elastalert-test-rule when using new term rule type 247 | 248 | ## v0.1.21 249 | 250 | ### Fixed 251 | - Fixed an incomplete bug fix for preventing duplicate enhancement runs 252 | 253 | ## v0.1.20 254 | 255 | ### Added 256 | - Added support for client TLS keys 257 | 258 | ### Fixed 259 | - Fixed the formatting of summary tables in Slack 260 | - Fixed ES_USE_SSL env variable 261 | - Fixed the unique value count printed by new_term rule type 262 | - Jira alerter no longer uses the non-existent json code formatter 263 | 264 | ## v0.1.19 265 | 266 | ### Added 267 | - Added support for populating JIRA fields via fields in the match 268 | - Added support for using a TLS certificate file for SMTP connections 269 | - Allow a custom suffix for non-analyzed Elasticsearch fields, like ".raw" or ".keyword" 270 | - Added match_time to Elastalert alert documents in Elasticsearch 271 | 272 | ### Fixed 273 | - Fixed an error in the documentation for rule importing 274 | - Prevent enhancements from re-running on retried alerts 275 | - Fixed a bug when using custom timestamp formats and new term rule 276 | - Lowered jira_bump_after_inactivity default to 0 days 277 | 278 | ## v0.1.18 279 | 280 | ### Added 281 | - Added a new alerter "post" based on "simple" which makes POSTS JSON to HTTP endpoints 282 | - Added an option jira_bump_after_inacitivty to prevent ElastAlert commenting on active JIRA tickets 283 | 284 | ### Removed 285 | - Removed "simple" alerter, replaced by "post" 286 | 287 | ## v0.1.17 288 | 289 | ### Added 290 | - Added a --patience flag to allow Elastalert to wait for Elasticsearch to become available 291 | - Allow custom PagerDuty alert titles via alert_subject 292 | 293 | ## v0.1.16 294 | 295 | ### Fixed 296 | - Fixed a bug where JIRA titles might not use query_key values 297 | - Fixed a bug where flatline alerts don't respect query_key for realert 298 | - Fixed a typo "twilio_accout_sid" 299 | 300 | ### Added 301 | - Added support for env variables in kibana4 dashboard links 302 | - Added ca_certs option for custom CA support 303 | 304 | ## v0.1.15 305 | 306 | ### Fixed 307 | - Fixed a bug where Elastalert would crash on connection error during startup 308 | - Fixed some typos in documentation 309 | - Fixed a bug in metric bucket offset calculation 310 | - Fixed a TypeError in Service Now alerter 311 | 312 | ### Added 313 | - Added support for compound compare key in change rules 314 | - Added support for absolute paths in rule config imports 315 | - Added Microsoft Teams alerter 316 | - Added support for markdown in Slack alerts 317 | - Added error codes to test script 318 | - Added support for lists in email_from_field 319 | 320 | 321 | ## v0.1.14 - 2017-05-11 322 | 323 | ### Fixed 324 | - Twilio alerter uses the from number appropriately 325 | - Fixed a TypeError in SNS alerter 326 | - Some changes to requirements.txt and setup.py 327 | - Fixed a TypeError in new term rule 328 | 329 | ### Added 330 | - Set a custom pagerduty incident key 331 | - Preserve traceback in most exceptions 332 | 333 | ## v0.1.12 - 2017-04-21 334 | 335 | ### Fixed 336 | - Fixed a bug causing filters to be ignored when using Elasticsearch 5 337 | 338 | 339 | ## v0.1.11 - 2017-04-19 340 | 341 | ### Fixed 342 | - Fixed an issue that would cause filters starting with "query" to sometimes throw errors in ES5 343 | - Fixed a bug with multiple versions of ES on different rules 344 | - Fixed a possible KeyError when using use_terms_query with ES5 345 | 346 | ## v0.1.10 - 2017-04-17 347 | 348 | ### Fixed 349 | - Fixed an AttributeError occuring with older versions of Elasticsearch library 350 | - Made example rules more consistent and with unique names 351 | - Fixed an error caused by a typo when es_username is used 352 | 353 | ## v0.1.9 - 2017-04-14 354 | 355 | ### Added 356 | - Added a changelog 357 | - Added metric aggregation rule type 358 | - Added percentage match rule type 359 | - Added default doc style and improved the instructions 360 | - Rule names will default to the filename 361 | - Added import keyword in rules to include sections from other files 362 | - Added email_from_field option to derive the recipient from a field in the match 363 | - Added simple HTTP alerter 364 | - Added Exotel SMS alerter 365 | - Added a readme link to third party Kibana plugin 366 | - Added option to use env variables to configure some settings 367 | - Added duplicate hits count in log line 368 | 369 | ### Fixed 370 | - Fixed a bug in change rule where a boolean false would be ignored 371 | - Clarify documentation on format of alert_text_args and alert_text_kw 372 | - Fixed a bug preventing new silence stashes from being loaded after a rule has previous alerted 373 | - Changed the default es_host in elastalert-test-rule to localhost 374 | - Fixed a bug preventing ES <5.0 formatted queries working in elastalert-test-rule 375 | - Fixed top_count_keys adding .raw on ES >5.0, uses .keyword instead 376 | - Fixed a bug causing compound aggregation keys not to work 377 | - Better error reporting for the Jira alerter 378 | - AWS request signing now refreshes credentials, uses boto3 379 | - Support multiple ES versions on different rules 380 | - Added documentation for percentage match rule type 381 | 382 | ### Removed 383 | - Removed a feature that would disable writeback_es on errors, causing various issues 384 | -------------------------------------------------------------------------------- /config.yaml.example: -------------------------------------------------------------------------------- 1 | # This is the folder that contains the rule yaml files 2 | # Any .yaml file will be loaded as a rule 3 | rules_folder: example_rules 4 | 5 | # How often ElastAlert will query Elasticsearch 6 | # The unit can be anything from weeks to seconds 7 | run_every: 8 | minutes: 1 9 | 10 | # ElastAlert will buffer results from the most recent 11 | # period of time, in case some log sources are not in real time 12 | buffer_time: 13 | minutes: 15 14 | 15 | # The Elasticsearch hostname for metadata writeback 16 | # Note that every rule can have its own Elasticsearch host 17 | es_host: elasticsearch.example.com 18 | 19 | # The Elasticsearch port 20 | es_port: 9200 21 | 22 | # The AWS region to use. Set this when using AWS-managed elasticsearch 23 | #aws_region: us-east-1 24 | 25 | # The AWS profile to use. Use this if you are using an aws-cli profile. 26 | # See http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html 27 | # for details 28 | #profile: test 29 | 30 | # Optional URL prefix for Elasticsearch 31 | #es_url_prefix: elasticsearch 32 | 33 | # Connect with TLS to Elasticsearch 34 | #use_ssl: True 35 | 36 | # Verify TLS certificates 37 | #verify_certs: True 38 | 39 | # GET request with body is the default option for Elasticsearch. 40 | # If it fails for some reason, you can pass 'GET', 'POST' or 'source'. 41 | # See http://elasticsearch-py.readthedocs.io/en/master/connection.html?highlight=send_get_body_as#transport 42 | # for details 43 | #es_send_get_body_as: GET 44 | 45 | # Option basic-auth username and password for Elasticsearch 46 | #es_username: someusername 47 | #es_password: somepassword 48 | 49 | # Use SSL authentication with client certificates client_cert must be 50 | # a pem file containing both cert and key for client 51 | #verify_certs: True 52 | #ca_certs: /path/to/cacert.pem 53 | #client_cert: /path/to/client_cert.pem 54 | #client_key: /path/to/client_key.key 55 | 56 | # The index on es_host which is used for metadata storage 57 | # This can be a unmapped index, but it is recommended that you run 58 | # elastalert-create-index to set a mapping 59 | writeback_index: elastalert_status 60 | writeback_alias: elastalert_alerts 61 | 62 | # If an alert fails for some reason, ElastAlert will retry 63 | # sending the alert until this time period has elapsed 64 | alert_time_limit: 65 | days: 2 66 | 67 | # Custom logging configuration 68 | # If you want to setup your own logging configuration to log into 69 | # files as well or to Logstash and/or modify log levels, use 70 | # the configuration below and adjust to your needs. 71 | # Note: if you run ElastAlert with --verbose/--debug, the log level of 72 | # the "elastalert" logger is changed to INFO, if not already INFO/DEBUG. 73 | #logging: 74 | # version: 1 75 | # incremental: false 76 | # disable_existing_loggers: false 77 | # formatters: 78 | # logline: 79 | # format: '%(asctime)s %(levelname)+8s %(name)+20s %(message)s' 80 | # 81 | # handlers: 82 | # console: 83 | # class: logging.StreamHandler 84 | # formatter: logline 85 | # level: DEBUG 86 | # stream: ext://sys.stderr 87 | # 88 | # file: 89 | # class : logging.FileHandler 90 | # formatter: logline 91 | # level: DEBUG 92 | # filename: elastalert.log 93 | # 94 | # loggers: 95 | # elastalert: 96 | # level: WARN 97 | # handlers: [] 98 | # propagate: true 99 | # 100 | # elasticsearch: 101 | # level: WARN 102 | # handlers: [] 103 | # propagate: true 104 | # 105 | # elasticsearch.trace: 106 | # level: WARN 107 | # handlers: [] 108 | # propagate: true 109 | # 110 | # '': # root logger 111 | # level: WARN 112 | # handlers: 113 | # - console 114 | # - file 115 | # propagate: false 116 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | services: 3 | tox: 4 | build: 5 | context: ./ 6 | dockerfile: Dockerfile-test 7 | command: tox 8 | container_name: elastalert_tox 9 | working_dir: /home/elastalert 10 | volumes: 11 | - ./:/home/elastalert/ 12 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | PAPER = 8 | BUILDDIR = build 9 | 10 | # Internal variables. 11 | PAPEROPT_a4 = -D latex_paper_size=a4 12 | PAPEROPT_letter = -D latex_paper_size=letter 13 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source 14 | 15 | .PHONY: help clean html dirhtml pickle json htmlhelp qthelp latex changes linkcheck doctest 16 | 17 | help: 18 | @echo "Please use \`make ' where is one of" 19 | @echo " html to make standalone HTML files" 20 | @echo " dirhtml to make HTML files named index.html in directories" 21 | @echo " pickle to make pickle files" 22 | @echo " json to make JSON files" 23 | @echo " htmlhelp to make HTML files and a HTML help project" 24 | @echo " qthelp to make HTML files and a qthelp project" 25 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" 26 | @echo " changes to make an overview of all changed/added/deprecated items" 27 | @echo " linkcheck to check all external links for integrity" 28 | @echo " doctest to run all doctests embedded in the documentation (if enabled)" 29 | 30 | clean: 31 | -rm -rf $(BUILDDIR)/* 32 | 33 | html: 34 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html 35 | @echo 36 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." 37 | 38 | dirhtml: 39 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml 40 | @echo 41 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." 42 | 43 | pickle: 44 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle 45 | @echo 46 | @echo "Build finished; now you can process the pickle files." 47 | 48 | json: 49 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json 50 | @echo 51 | @echo "Build finished; now you can process the JSON files." 52 | 53 | htmlhelp: 54 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp 55 | @echo 56 | @echo "Build finished; now you can run HTML Help Workshop with the" \ 57 | ".hhp project file in $(BUILDDIR)/htmlhelp." 58 | 59 | qthelp: 60 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp 61 | @echo 62 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \ 63 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:" 64 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/monitor.qhcp" 65 | @echo "To view the help file:" 66 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/monitor.qhc" 67 | 68 | latex: 69 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 70 | @echo 71 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." 72 | @echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \ 73 | "run these through (pdf)latex." 74 | 75 | changes: 76 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes 77 | @echo 78 | @echo "The overview file is in $(BUILDDIR)/changes." 79 | 80 | linkcheck: 81 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck 82 | @echo 83 | @echo "Link check complete; look for any errors in the above output " \ 84 | "or in $(BUILDDIR)/linkcheck/output.txt." 85 | 86 | doctest: 87 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest 88 | @echo "Testing of doctests in the sources finished, look at the " \ 89 | "results in $(BUILDDIR)/doctest/output.txt." 90 | -------------------------------------------------------------------------------- /docs/source/_static/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Yelp/elastalert/e0bbcb5b71e9fdb4a750c3871d365a882ff17b16/docs/source/_static/.gitkeep -------------------------------------------------------------------------------- /docs/source/conf.py: -------------------------------------------------------------------------------- 1 | import sphinx_rtd_theme 2 | 3 | # -*- coding: utf-8 -*- 4 | # 5 | # ElastAlert documentation build configuration file, created by 6 | # sphinx-quickstart on Thu Jul 11 15:45:31 2013. 7 | # 8 | # This file is execfile()d with the current directory set to its containing dir. 9 | # 10 | # Note that not all possible configuration values are present in this 11 | # autogenerated file. 12 | # 13 | # All configuration values have a default; values that are commented out 14 | # serve to show the default. 15 | # If extensions (or modules to document with autodoc) are in another directory, 16 | # add these directories to sys.path here. If the directory is relative to the 17 | # documentation root, use os.path.abspath to make it absolute, like shown here. 18 | # sys.path.append(os.path.abspath('.')) 19 | # -- General configuration ----------------------------------------------------- 20 | # Add any Sphinx extension module names here, as strings. They can be extensions 21 | # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. 22 | extensions = [] 23 | 24 | # Add any paths that contain templates here, relative to this directory. 25 | templates_path = ['_templates'] 26 | 27 | # The suffix of source filenames. 28 | source_suffix = '.rst' 29 | 30 | # The encoding of source files. 31 | # source_encoding = 'utf-8' 32 | 33 | # The master toctree document. 34 | master_doc = 'index' 35 | 36 | # General information about the project. 37 | project = u'ElastAlert' 38 | copyright = u'2014, Yelp' 39 | 40 | # The version info for the project you're documenting, acts as replacement for 41 | # |version| and |release|, also used in various other places throughout the 42 | # built documents. 43 | # 44 | # The short X.Y version. 45 | version = '0.0.1' 46 | # The full version, including alpha/beta/rc tags. 47 | release = '0.0.1' 48 | 49 | # The language for content autogenerated by Sphinx. Refer to documentation 50 | # for a list of supported languages. 51 | # language = None 52 | 53 | # There are two options for replacing |today|: either, you set today to some 54 | # non-false value, then it is used: 55 | # today = '' 56 | # Else, today_fmt is used as the format for a strftime call. 57 | # today_fmt = '%B %d, %Y' 58 | 59 | # List of documents that shouldn't be included in the build. 60 | # unused_docs = [] 61 | 62 | # List of directories, relative to source directory, that shouldn't be searched 63 | # for source files. 64 | exclude_trees = [] 65 | 66 | # The reST default role (used for this markup: `text`) to use for all documents. 67 | # default_role = None 68 | 69 | # If true, '()' will be appended to :func: etc. cross-reference text. 70 | # add_function_parentheses = True 71 | 72 | # If true, the current module name will be prepended to all description 73 | # unit titles (such as .. function::). 74 | # add_module_names = True 75 | 76 | # If true, sectionauthor and moduleauthor directives will be shown in the 77 | # output. They are ignored by default. 78 | # show_authors = False 79 | 80 | # The name of the Pygments (syntax highlighting) style to use. 81 | pygments_style = 'sphinx' 82 | 83 | # A list of ignored prefixes for module index sorting. 84 | # modindex_common_prefix = [] 85 | 86 | 87 | # -- Options for HTML output --------------------------------------------------- 88 | 89 | # The theme to use for HTML and HTML Help pages. Major themes that come with 90 | # Sphinx are currently 'default' and 'sphinxdoc'. 91 | html_theme = 'sphinx_rtd_theme' 92 | 93 | # Theme options are theme-specific and customize the look and feel of a theme 94 | # further. For a list of options available for each theme, see the 95 | # documentation. 96 | # html_theme_options = {} 97 | 98 | # Add any paths that contain custom themes here, relative to this directory. 99 | html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] 100 | # html_theme_path = [] 101 | 102 | # The name for this set of Sphinx documents. If None, it defaults to 103 | # " v documentation". 104 | # html_title = None 105 | 106 | # A shorter title for the navigation bar. Default is the same as html_title. 107 | # html_short_title = None 108 | 109 | # The name of an image file (relative to this directory) to place at the top 110 | # of the sidebar. 111 | # html_logo = None 112 | 113 | # The name of an image file (within the static path) to use as favicon of the 114 | # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 115 | # pixels large. 116 | # html_favicon = None 117 | 118 | # Add any paths that contain custom static files (such as style sheets) here, 119 | # relative to this directory. They are copied after the builtin static files, 120 | # so a file named "default.css" will overwrite the builtin "default.css". 121 | html_static_path = ['_static'] 122 | 123 | # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, 124 | # using the given strftime format. 125 | # html_last_updated_fmt = '%b %d, %Y' 126 | 127 | # If true, SmartyPants will be used to convert quotes and dashes to 128 | # typographically correct entities. 129 | # html_use_smartypants = True 130 | 131 | # Custom sidebar templates, maps document names to template names. 132 | # html_sidebars = {} 133 | 134 | # Additional templates that should be rendered to pages, maps page names to 135 | # template names. 136 | # html_additional_pages = {} 137 | 138 | # If false, no module index is generated. 139 | # html_use_modindex = True 140 | 141 | # If false, no index is generated. 142 | # html_use_index = True 143 | 144 | # If true, the index is split into individual pages for each letter. 145 | # html_split_index = False 146 | 147 | # If true, links to the reST sources are added to the pages. 148 | # html_show_sourcelink = True 149 | 150 | # If true, an OpenSearch description file will be output, and all pages will 151 | # contain a tag referring to it. The value of this option must be the 152 | # base URL from which the finished HTML is served. 153 | # html_use_opensearch = '' 154 | 155 | # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). 156 | # html_file_suffix = '' 157 | 158 | # Output file base name for HTML help builder. 159 | htmlhelp_basename = 'elastalertdoc' 160 | 161 | 162 | # -- Options for LaTeX output -------------------------------------------------- 163 | 164 | # The paper size ('letter' or 'a4'). 165 | # latex_paper_size = 'letter' 166 | 167 | # The font size ('10pt', '11pt' or '12pt'). 168 | # latex_font_size = '10pt' 169 | 170 | # Grouping the document tree into LaTeX files. List of tuples 171 | # (source start file, target name, title, author, documentclass [howto/manual]). 172 | latex_documents = [ 173 | ('index', 'elastalert.tex', u'ElastAlert Documentation', 174 | u'Quentin Long', 'manual'), 175 | ] 176 | 177 | # The name of an image file (relative to this directory) to place at the top of 178 | # the title page. 179 | # latex_logo = None 180 | 181 | # For "manual" documents, if this is true, then toplevel headings are parts, 182 | # not chapters. 183 | # latex_use_parts = False 184 | 185 | # Additional stuff for the LaTeX preamble. 186 | # latex_preamble = '' 187 | 188 | # Documents to append as an appendix to all manuals. 189 | # latex_appendices = [] 190 | 191 | # If false, no module index is generated. 192 | # latex_use_modindex = True 193 | -------------------------------------------------------------------------------- /docs/source/elastalert_status.rst: -------------------------------------------------------------------------------- 1 | .. _metadata: 2 | 3 | ElastAlert Metadata Index 4 | ========================= 5 | 6 | ElastAlert uses Elasticsearch to store various information about its state. This not only allows for some 7 | level of auditing and debugging of ElastAlert's operation, but also to avoid loss of data or duplication of alerts 8 | when ElastAlert is shut down, restarted, or crashes. This cluster and index information is defined 9 | in the global config file with ``es_host``, ``es_port`` and ``writeback_index``. ElastAlert must be able 10 | to write to this index. The script, ``elastalert-create-index`` will create the index with the correct mapping 11 | for you, and optionally copy the documents from an existing ElastAlert writeback index. Run it and it will 12 | prompt you for the cluster information. 13 | 14 | ElastAlert will create three different types of documents in the writeback index: 15 | 16 | elastalert_status 17 | ~~~~~~~~~~~~~~~~~ 18 | 19 | ``elastalert_status`` is a log of the queries performed for a given rule and contains: 20 | 21 | - ``@timestamp``: The time when the document was uploaded to Elasticsearch. This is after a query has been run and the results have been processed. 22 | - ``rule_name``: The name of the corresponding rule. 23 | - ``starttime``: The beginning of the timestamp range the query searched. 24 | - ``endtime``: The end of the timestamp range the query searched. 25 | - ``hits``: The number of results from the query. 26 | - ``matches``: The number of matches that the rule returned after processing the hits. Note that this does not necessarily mean that alerts were triggered. 27 | - ``time_taken``: The number of seconds it took for this query to run. 28 | 29 | ``elastalert_status`` is what ElastAlert will use to determine what time range to query when it first starts to avoid duplicating queries. 30 | For each rule, it will start querying from the most recent endtime. If ElastAlert is running in debug mode, it will still attempt to base 31 | its start time by looking for the most recent search performed, but it will not write the results of any query back to Elasticsearch. 32 | 33 | elastalert 34 | ~~~~~~~~~~ 35 | 36 | ``elastalert`` is a log of information about every alert triggered and contains: 37 | 38 | - ``@timestamp``: The time when the document was uploaded to Elasticsearch. This is not the same as when the alert was sent, but rather when the rule outputs a match. 39 | - ``rule_name``: The name of the corresponding rule. 40 | - ``alert_info``: This contains the output of Alert.get_info, a function that alerts implement to give some relevant context to the alert type. This may contain alert_info.type, alert_info.recipient, or any number of other sub fields. 41 | - ``alert_sent``: A boolean value as to whether this alert was actually sent or not. It may be false in the case of an exception or if it is part of an aggregated alert. 42 | - ``alert_time``: The time that the alert was or will be sent. Usually, this is the same as @timestamp, but may be some time in the future, indicating when an aggregated alert will be sent. 43 | - ``match_body``: This is the contents of the match dictionary that is used to create the alert. The subfields may include a number of things containing information about the alert. 44 | - ``alert_exception``: This field is only present when the alert failed because of an exception occurring, and will contain the exception information. 45 | - ``aggregate_id``: This field is only present when the rule is configured to use aggregation. The first alert of the aggregation period will contain an alert_time set to the aggregation time into the future, and subsequent alerts will contain the document ID of the first. When the alert_time is reached, all alerts with that aggregate_id will be sent together. 46 | 47 | elastalert_error 48 | ~~~~~~~~~~~~~~~~ 49 | 50 | When an error occurs in ElastAlert, it is written to both Elasticsearch and to stderr. The ``elastalert_error`` type contains: 51 | 52 | - ``@timestamp``: The time when the error occurred. 53 | - ``message``: The error or exception message. 54 | - ``traceback``: The traceback from when the error occurred. 55 | - ``data``: Extra information about the error. This often contains the name of the rule which caused the error. 56 | 57 | silence 58 | ~~~~~~~ 59 | 60 | ``silence`` is a record of when alerts for a given rule will be suppressed, either because of a ``realert`` setting or from using --silence. When 61 | an alert with ``realert`` is triggered, a ``silence`` record will be written with ``until`` set to the alert time plus ``realert``. 62 | 63 | - ``@timestamp``: The time when the document was uploaded to Elasticsearch. 64 | - ``rule_name``: The name of the corresponding rule. 65 | - ``until``: The timestamp when alerts will begin being sent again. 66 | - ``exponent``: The exponential factor which multiplies ``realert``. The length of this silence is equal to ``realert`` * 2**exponent. This will 67 | be 0 unless ``exponential_realert`` is set. 68 | 69 | Whenever an alert is triggered, ElastAlert will check for a matching ``silence`` document, and if the ``until`` timestamp is in the future, it will ignore 70 | the alert completely. See the :ref:`Running ElastAlert ` section for information on how to silence an alert. 71 | -------------------------------------------------------------------------------- /docs/source/index.rst: -------------------------------------------------------------------------------- 1 | .. ElastAlert documentation master file, created by 2 | sphinx-quickstart on Thu Jul 11 15:45:31 2013. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | ElastAlert - Easy & Flexible Alerting With Elasticsearch 7 | ======================================================== 8 | 9 | Contents: 10 | 11 | .. toctree:: 12 | :maxdepth: 2 13 | 14 | elastalert 15 | running_elastalert 16 | ruletypes 17 | elastalert_status 18 | recipes/adding_rules 19 | recipes/adding_alerts 20 | recipes/writing_filters 21 | recipes/adding_enhancements 22 | recipes/adding_loaders 23 | recipes/signing_requests 24 | 25 | Indices and Tables 26 | ================== 27 | 28 | * :ref:`genindex` 29 | * :ref:`modindex` 30 | * :ref:`search` 31 | -------------------------------------------------------------------------------- /docs/source/recipes/adding_alerts.rst: -------------------------------------------------------------------------------- 1 | .. _writingalerts: 2 | 3 | Adding a New Alerter 4 | ==================== 5 | 6 | Alerters are subclasses of ``Alerter``, found in ``elastalert/alerts.py``. They are given matches 7 | and perform some action based on that. Your alerter needs to implement two member functions, and will look 8 | something like this: 9 | 10 | .. code-block:: python 11 | 12 | class AwesomeNewAlerter(Alerter): 13 | required_options = set(['some_config_option']) 14 | def alert(self, matches): 15 | ... 16 | def get_info(self): 17 | ... 18 | 19 | You can import alert types by specifying the type as ``module.file.AlertName``, where module is the name of a python module, 20 | and file is the name of the python file containing a ``Alerter`` subclass named ``AlertName``. 21 | 22 | Basics 23 | ------ 24 | 25 | The alerter class will be instantiated when ElastAlert starts, and be periodically passed 26 | matches through the ``alert`` method. ElastAlert also writes back info about the alert into 27 | Elasticsearch that it obtains through ``get_info``. Several important member properties: 28 | 29 | ``self.required_options``: This is a set containing names of configuration options that must be 30 | present. ElastAlert will not instantiate the alert if any are missing. 31 | 32 | ``self.rule``: The dictionary containing the rule configuration. All options specific to the alert 33 | should be in the rule configuration file and can be accessed here. 34 | 35 | ``self.pipeline``: This is a dictionary object that serves to transfer information between alerts. When an alert is triggered, 36 | a new empty pipeline object will be created and each alerter can add or receive information from it. Note that alerters 37 | are called in the order they are defined in the rule file. For example, the JIRA alerter will add its ticket number 38 | to the pipeline and the email alerter will add that link if it's present in the pipeline. 39 | 40 | alert(self, match): 41 | ------------------- 42 | 43 | ElastAlert will call this function to send an alert. ``matches`` is a list of dictionary objects with 44 | information about the match. You can get a nice string representation of the match by calling 45 | ``self.rule['type'].get_match_str(match, self.rule)``. If this method raises an exception, it will 46 | be caught by ElastAlert and the alert will be marked as unsent and saved for later. 47 | 48 | get_info(self): 49 | --------------- 50 | 51 | This function is called to get information about the alert to save back to Elasticsearch. It should 52 | return a dictionary, which is uploaded directly to Elasticsearch, and should contain useful information 53 | about the alert such as the type, recipients, parameters, etc. 54 | 55 | Tutorial 56 | -------- 57 | 58 | Let's create a new alert that will write alerts to a local output file. First, 59 | create a modules folder in the base ElastAlert folder: 60 | 61 | .. code-block:: console 62 | 63 | $ mkdir elastalert_modules 64 | $ cd elastalert_modules 65 | $ touch __init__.py 66 | 67 | Now, in a file named ``my_alerts.py``, add 68 | 69 | .. code-block:: python 70 | 71 | from elastalert.alerts import Alerter, BasicMatchString 72 | 73 | class AwesomeNewAlerter(Alerter): 74 | 75 | # By setting required_options to a set of strings 76 | # You can ensure that the rule config file specifies all 77 | # of the options. Otherwise, ElastAlert will throw an exception 78 | # when trying to load the rule. 79 | required_options = set(['output_file_path']) 80 | 81 | # Alert is called 82 | def alert(self, matches): 83 | 84 | # Matches is a list of match dictionaries. 85 | # It contains more than one match when the alert has 86 | # the aggregation option set 87 | for match in matches: 88 | 89 | # Config options can be accessed with self.rule 90 | with open(self.rule['output_file_path'], "a") as output_file: 91 | 92 | # basic_match_string will transform the match into the default 93 | # human readable string format 94 | match_string = str(BasicMatchString(self.rule, match)) 95 | 96 | output_file.write(match_string) 97 | 98 | # get_info is called after an alert is sent to get data that is written back 99 | # to Elasticsearch in the field "alert_info" 100 | # It should return a dict of information relevant to what the alert does 101 | def get_info(self): 102 | return {'type': 'Awesome Alerter', 103 | 'output_file': self.rule['output_file_path']} 104 | 105 | 106 | In the rule configuration file, we are going to specify the alert by writing 107 | 108 | .. code-block:: yaml 109 | 110 | alert: "elastalert_modules.my_alerts.AwesomeNewAlerter" 111 | output_file_path: "/tmp/alerts.log" 112 | 113 | ElastAlert will attempt to import the alert with ``from elastalert_modules.my_alerts import AwesomeNewAlerter``. 114 | This means that the folder must be in a location where it can be imported as a python module. 115 | -------------------------------------------------------------------------------- /docs/source/recipes/adding_enhancements.rst: -------------------------------------------------------------------------------- 1 | .. _enhancements: 2 | 3 | Enhancements 4 | ============ 5 | 6 | Enhancements are modules which let you modify a match before an alert is sent. They should subclass ``BaseEnhancement``, found in ``elastalert/enhancements.py``. 7 | They can be added to rules using the ``match_enhancements`` option:: 8 | 9 | match_enhancements: 10 | - module.file.MyEnhancement 11 | 12 | where module is the name of a Python module, or folder containing ``__init__.py``, 13 | and file is the name of the Python file containing a ``BaseEnhancement`` subclass named ``MyEnhancement``. 14 | 15 | A special exception class ```DropMatchException``` can be used in enhancements to drop matches if custom conditions are met. For example: 16 | 17 | .. code-block:: python 18 | 19 | class MyEnhancement(BaseEnhancement): 20 | def process(self, match): 21 | # Drops a match if "field_1" == "field_2" 22 | if match['field_1'] == match['field_2']: 23 | raise DropMatchException() 24 | 25 | Example 26 | ------- 27 | 28 | As an example enhancement, let's add a link to a whois website. The match must contain a field named domain and it will 29 | add an entry named domain_whois_link. First, create a modules folder for the enhancement in the ElastAlert directory. 30 | 31 | .. code-block:: console 32 | 33 | $ mkdir elastalert_modules 34 | $ cd elastalert_modules 35 | $ touch __init__.py 36 | 37 | Now, in a file named ``my_enhancements.py``, add 38 | 39 | 40 | .. code-block:: python 41 | 42 | from elastalert.enhancements import BaseEnhancement 43 | 44 | class MyEnhancement(BaseEnhancement): 45 | 46 | # The enhancement is run against every match 47 | # The match is passed to the process function where it can be modified in any way 48 | # ElastAlert will do this for each enhancement linked to a rule 49 | def process(self, match): 50 | if 'domain' in match: 51 | url = "http://who.is/whois/%s" % (match['domain']) 52 | match['domain_whois_link'] = url 53 | 54 | Enhancements will not automatically be run. Inside the rule configuration file, you need to point it to the enhancement(s) that it should run 55 | by setting the ``match_enhancements`` option:: 56 | 57 | match_enhancements: 58 | - "elastalert_modules.my_enhancements.MyEnhancement" 59 | 60 | -------------------------------------------------------------------------------- /docs/source/recipes/adding_loaders.rst: -------------------------------------------------------------------------------- 1 | .. _loaders: 2 | 3 | Rules Loaders 4 | ======================== 5 | 6 | RulesLoaders are subclasses of ``RulesLoader``, found in ``elastalert/loaders.py``. They are used to 7 | gather rules for a particular source. Your RulesLoader needs to implement three member functions, and 8 | will look something like this: 9 | 10 | .. code-block:: python 11 | 12 | class AwesomeNewRulesLoader(RulesLoader): 13 | def get_names(self, conf, use_rule=None): 14 | ... 15 | def get_hashes(self, conf, use_rule=None): 16 | ... 17 | def get_yaml(self, rule): 18 | ... 19 | 20 | You can import loaders by specifying the type as ``module.file.RulesLoaderName``, where module is the name of a 21 | python module, and file is the name of the python file containing a ``RulesLoader`` subclass named ``RulesLoaderName``. 22 | 23 | Example 24 | ------- 25 | 26 | As an example loader, let's retrieve rules from a database rather than from the local file system. First, create a 27 | modules folder for the loader in the ElastAlert directory. 28 | 29 | .. code-block:: console 30 | 31 | $ mkdir elastalert_modules 32 | $ cd elastalert_modules 33 | $ touch __init__.py 34 | 35 | Now, in a file named ``mongo_loader.py``, add 36 | 37 | .. code-block:: python 38 | 39 | from pymongo import MongoClient 40 | from elastalert.loaders import RulesLoader 41 | import yaml 42 | 43 | class MongoRulesLoader(RulesLoader): 44 | def __init__(self, conf): 45 | super(MongoRulesLoader, self).__init__(conf) 46 | self.client = MongoClient(conf['mongo_url']) 47 | self.db = self.client[conf['mongo_db']] 48 | self.cache = {} 49 | 50 | def get_names(self, conf, use_rule=None): 51 | if use_rule: 52 | return [use_rule] 53 | 54 | rules = [] 55 | self.cache = {} 56 | for rule in self.db.rules.find(): 57 | self.cache[rule['name']] = yaml.load(rule['yaml']) 58 | rules.append(rule['name']) 59 | 60 | return rules 61 | 62 | def get_hashes(self, conf, use_rule=None): 63 | if use_rule: 64 | return [use_rule] 65 | 66 | hashes = {} 67 | self.cache = {} 68 | for rule in self.db.rules.find(): 69 | self.cache[rule['name']] = rule['yaml'] 70 | hashes[rule['name']] = rule['hash'] 71 | 72 | return hashes 73 | 74 | def get_yaml(self, rule): 75 | if rule in self.cache: 76 | return self.cache[rule] 77 | 78 | self.cache[rule] = yaml.load(self.db.rules.find_one({'name': rule})['yaml']) 79 | return self.cache[rule] 80 | 81 | Finally, you need to specify in your ElastAlert configuration file that MongoRulesLoader should be used instead of the 82 | default FileRulesLoader, so in your ``elastalert.conf`` file:: 83 | 84 | rules_loader: "elastalert_modules.mongo_loader.MongoRulesLoader" 85 | 86 | -------------------------------------------------------------------------------- /docs/source/recipes/adding_rules.rst: -------------------------------------------------------------------------------- 1 | .. _writingrules: 2 | 3 | Adding a New Rule Type 4 | ====================== 5 | 6 | This document describes how to create a new rule type. Built in rule types live in ``elastalert/ruletypes.py`` 7 | and are subclasses of ``RuleType``. At the minimum, your rule needs to implement ``add_data``. 8 | 9 | Your class may implement several functions from ``RuleType``: 10 | 11 | .. code-block:: python 12 | 13 | class AwesomeNewRule(RuleType): 14 | # ... 15 | def add_data(self, data): 16 | # ... 17 | def get_match_str(self, match): 18 | # ... 19 | def garbage_collect(self, timestamp): 20 | # ... 21 | 22 | You can import new rule types by specifying the type as ``module.file.RuleName``, where module is the name of a Python module, or folder 23 | containing ``__init__.py``, and file is the name of the Python file containing a ``RuleType`` subclass named ``RuleName``. 24 | 25 | Basics 26 | ------ 27 | 28 | The ``RuleType`` instance remains in memory while ElastAlert is running, receives data, keeps track of its state, 29 | and generates matches. Several important member properties are created in the ``__init__`` method of ``RuleType``: 30 | 31 | ``self.rules``: This dictionary is loaded from the rule configuration file. If there is a ``timeframe`` configuration 32 | option, this will be automatically converted to a ``datetime.timedelta`` object when the rules are loaded. 33 | 34 | ``self.matches``: This is where ElastAlert checks for matches from the rule. Whatever information is relevant to the match 35 | (generally coming from the fields in Elasticsearch) should be put into a dictionary object and 36 | added to ``self.matches``. ElastAlert will pop items out periodically and send alerts based on these objects. It is 37 | recommended that you use ``self.add_match(match)`` to add matches. In addition to appending to ``self.matches``, 38 | ``self.add_match`` will convert the datetime ``@timestamp`` back into an ISO8601 timestamp. 39 | 40 | ``self.required_options``: This is a set of options that must exist in the configuration file. ElastAlert will 41 | ensure that all of these fields exist before trying to instantiate a ``RuleType`` instance. 42 | 43 | add_data(self, data): 44 | --------------------- 45 | 46 | When ElastAlert queries Elasticsearch, it will pass all of the hits to the rule type by calling ``add_data``. 47 | ``data`` is a list of dictionary objects which contain all of the fields in ``include``, ``query_key`` and ``compare_key`` 48 | if they exist, and ``@timestamp`` as a datetime object. They will always come in chronological order sorted by '@timestamp'. 49 | 50 | get_match_str(self, match): 51 | --------------------------- 52 | 53 | Alerts will call this function to get a human readable string about a match for an alert. Match will be the same 54 | object that was added to ``self.matches``, and ``rules`` the same as ``self.rules``. The ``RuleType`` base implementation 55 | will return an empty string. Note that by default, the alert text will already contain the key-value pairs from the match. This 56 | should return a string that gives some information about the match in the context of this specific RuleType. 57 | 58 | garbage_collect(self, timestamp): 59 | --------------------------------- 60 | 61 | This will be called after ElastAlert has run over a time period ending in ``timestamp`` and should be used 62 | to clear any state that may be obsolete as of ``timestamp``. ``timestamp`` is a datetime object. 63 | 64 | 65 | Tutorial 66 | -------- 67 | 68 | As an example, we are going to create a rule type for detecting suspicious logins. Let's imagine the data we are querying is login 69 | events that contains IP address, username and a timestamp. Our configuration will take a list of usernames and a time range 70 | and alert if a login occurs in the time range. First, let's create a modules folder in the base ElastAlert folder: 71 | 72 | .. code-block:: console 73 | 74 | $ mkdir elastalert_modules 75 | $ cd elastalert_modules 76 | $ touch __init__.py 77 | 78 | Now, in a file named ``my_rules.py``, add 79 | 80 | .. code-block:: python 81 | 82 | import dateutil.parser 83 | 84 | from elastalert.ruletypes import RuleType 85 | 86 | # elastalert.util includes useful utility functions 87 | # such as converting from timestamp to datetime obj 88 | from elastalert.util import ts_to_dt 89 | 90 | class AwesomeRule(RuleType): 91 | 92 | # By setting required_options to a set of strings 93 | # You can ensure that the rule config file specifies all 94 | # of the options. Otherwise, ElastAlert will throw an exception 95 | # when trying to load the rule. 96 | required_options = set(['time_start', 'time_end', 'usernames']) 97 | 98 | # add_data will be called each time Elasticsearch is queried. 99 | # data is a list of documents from Elasticsearch, sorted by timestamp, 100 | # including all the fields that the config specifies with "include" 101 | def add_data(self, data): 102 | for document in data: 103 | 104 | # To access config options, use self.rules 105 | if document['username'] in self.rules['usernames']: 106 | 107 | # Convert the timestamp to a time object 108 | login_time = document['@timestamp'].time() 109 | 110 | # Convert time_start and time_end to time objects 111 | time_start = dateutil.parser.parse(self.rules['time_start']).time() 112 | time_end = dateutil.parser.parse(self.rules['time_end']).time() 113 | 114 | # If the time falls between start and end 115 | if login_time > time_start and login_time < time_end: 116 | 117 | # To add a match, use self.add_match 118 | self.add_match(document) 119 | 120 | # The results of get_match_str will appear in the alert text 121 | def get_match_str(self, match): 122 | return "%s logged in between %s and %s" % (match['username'], 123 | self.rules['time_start'], 124 | self.rules['time_end']) 125 | 126 | # garbage_collect is called indicating that ElastAlert has already been run up to timestamp 127 | # It is useful for knowing that there were no query results from Elasticsearch because 128 | # add_data will not be called with an empty list 129 | def garbage_collect(self, timestamp): 130 | pass 131 | 132 | 133 | In the rule configuration file, ``example_rules/example_login_rule.yaml``, we are going to specify this rule by writing 134 | 135 | .. code-block:: yaml 136 | 137 | name: "Example login rule" 138 | es_host: elasticsearch.example.com 139 | es_port: 14900 140 | type: "elastalert_modules.my_rules.AwesomeRule" 141 | # Alert if admin, userXYZ or foobaz log in between 8 PM and midnight 142 | time_start: "20:00" 143 | time_end: "24:00" 144 | usernames: 145 | - "admin" 146 | - "userXYZ" 147 | - "foobaz" 148 | # We require the username field from documents 149 | include: 150 | - "username" 151 | alert: 152 | - debug 153 | 154 | ElastAlert will attempt to import the rule with ``from elastalert_modules.my_rules import AwesomeRule``. 155 | This means that the folder must be in a location where it can be imported as a Python module. 156 | 157 | An alert from this rule will look something like:: 158 | 159 | Example login rule 160 | 161 | userXYZ logged in between 20:00 and 24:00 162 | 163 | @timestamp: 2015-03-02T22:23:24Z 164 | username: userXYZ 165 | -------------------------------------------------------------------------------- /docs/source/recipes/signing_requests.rst: -------------------------------------------------------------------------------- 1 | .. _signingrequests: 2 | 3 | Signing requests to Amazon Elasticsearch service 4 | ================================================ 5 | 6 | When using Amazon Elasticsearch service, you need to secure your Elasticsearch 7 | from the outside. Currently, there is no way to secure your Elasticsearch using 8 | network firewall rules, so the only way is to signing the requests using the 9 | access key and secret key for a role or user with permissions on the 10 | Elasticsearch service. 11 | 12 | You can sign requests to AWS using any of the standard AWS methods of providing 13 | credentials. 14 | - Environment Variables, ``AWS_ACCESS_KEY_ID`` and ``AWS_SECRET_ACCESS_KEY`` 15 | - AWS Config or Credential Files, ``~/.aws/config`` and ``~/.aws/credentials`` 16 | - AWS Instance Profiles, uses the EC2 Metadata service 17 | 18 | Using an Instance Profile 19 | ------------------------- 20 | 21 | Typically, you'll deploy ElastAlert on a running EC2 instance on AWS. You can 22 | assign a role to this instance that gives it permissions to read from and write 23 | to the Elasticsearch service. When using an Instance Profile, you will need to 24 | specify the ``aws_region`` in the configuration file or set the 25 | ``AWS_DEFAULT_REGION`` environment variable. 26 | 27 | Using AWS profiles 28 | ------------------ 29 | 30 | You can also create a user with permissions on the Elasticsearch service and 31 | tell ElastAlert to authenticate itself using that user. First, create an AWS 32 | profile in the machine where you'd like to run ElastAlert for the user with 33 | permissions. 34 | 35 | You can use the environment variables ``AWS_DEFAULT_PROFILE`` and 36 | ``AWS_DEFAULT_REGION`` or add two options to the configuration file: 37 | - ``aws_region``: The AWS region where you want to operate. 38 | - ``profile``: The name of the AWS profile to use to sign the requests. 39 | -------------------------------------------------------------------------------- /docs/source/recipes/writing_filters.rst: -------------------------------------------------------------------------------- 1 | .. _writingfilters: 2 | 3 | Writing Filters For Rules 4 | ========================= 5 | 6 | This document describes how to create a filter section for your rule config file. 7 | 8 | The filters used in rules are part of the Elasticsearch query DSL, further documentation for which can be found at 9 | https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl.html 10 | This document contains a small subset of particularly useful filters. 11 | 12 | The filter section is passed to Elasticsearch exactly as follows:: 13 | 14 | filter: 15 | and: 16 | filters: 17 | - [filters from rule.yaml] 18 | 19 | Every result that matches these filters will be passed to the rule for processing. 20 | 21 | Common Filter Types: 22 | -------------------- 23 | 24 | query_string 25 | ************ 26 | 27 | The query_string type follows the Lucene query format and can be used for partial or full matches to multiple fields. 28 | See http://lucene.apache.org/core/2_9_4/queryparsersyntax.html for more information:: 29 | 30 | filter: 31 | - query: 32 | query_string: 33 | query: "username: bob" 34 | - query: 35 | query_string: 36 | query: "_type: login_logs" 37 | - query: 38 | query_string: 39 | query: "field: value OR otherfield: othervalue" 40 | - query: 41 | query_string: 42 | query: "this: that AND these: those" 43 | 44 | term 45 | **** 46 | 47 | The term type allows for exact field matches:: 48 | 49 | filter: 50 | - term: 51 | name_field: "bob" 52 | - term: 53 | _type: "login_logs" 54 | 55 | Note that a term query may not behave as expected if a field is analyzed. By default, many string fields will be tokenized by whitespace, and a term query for "foo bar" may not match 56 | a field that appears to have the value "foo bar", unless it is not analyzed. Conversely, a term query for "foo" will match analyzed strings "foo bar" and "foo baz". For full text 57 | matching on analyzed fields, use query_string. See https://www.elastic.co/guide/en/elasticsearch/guide/current/term-vs-full-text.html 58 | 59 | `terms `_ 60 | ***************************************************************************************************** 61 | 62 | 63 | 64 | Terms allows for easy combination of multiple term filters:: 65 | 66 | filter: 67 | - terms: 68 | field: ["value1", "value2"] # value1 OR value2 69 | 70 | You can also match on multiple fields:: 71 | 72 | - terms: 73 | fieldX: ["value1", "value2"] 74 | fieldY: ["something", "something_else"] 75 | fieldZ: ["foo", "bar", "baz"] 76 | 77 | wildcard 78 | ******** 79 | 80 | For wildcard matches:: 81 | 82 | filter: 83 | - query: 84 | wildcard: 85 | field: "foo*bar" 86 | 87 | range 88 | ***** 89 | 90 | For ranges on fields:: 91 | 92 | filter: 93 | - range: 94 | status_code: 95 | from: 500 96 | to: 599 97 | 98 | Negation, and, or 99 | ***************** 100 | 101 | For Elasticsearch 2.X, any of the filters can be embedded in ``not``, ``and``, and ``or``:: 102 | 103 | filter: 104 | - or: 105 | - term: 106 | field: "value" 107 | - wildcard: 108 | field: "foo*bar" 109 | - and: 110 | - not: 111 | term: 112 | field: "value" 113 | - not: 114 | term: 115 | _type: "something" 116 | 117 | For Elasticsearch 5.x, this will not work and to implement boolean logic use query strings:: 118 | 119 | filter: 120 | - query: 121 | query_string: 122 | query: "somefield: somevalue OR foo: bar" 123 | 124 | 125 | Loading Filters Directly From Kibana 3 126 | -------------------------------------- 127 | 128 | There are two ways to load filters directly from a Kibana 3 dashboard. You can set your filter to:: 129 | 130 | filter: 131 | download_dashboard: "My Dashboard Name" 132 | 133 | and when ElastAlert starts, it will download the dashboard schema from Elasticsearch and use the filters from that. 134 | However, if the dashboard name changes or if there is connectivity problems when ElastAlert starts, the rule will not load and 135 | ElastAlert will exit with an error like "Could not download filters for .." 136 | 137 | The second way is to generate a config file once using the Kibana dashboard. To do this, run ``elastalert-rule-from-kibana``. 138 | 139 | .. code-block:: console 140 | 141 | $ elastalert-rule-from-kibana 142 | Elasticsearch host: elasticsearch.example.com 143 | Elasticsearch port: 14900 144 | Dashboard name: My Dashboard 145 | 146 | Partial Config file 147 | ----------- 148 | 149 | name: My Dashboard 150 | es_host: elasticsearch.example.com 151 | es_port: 14900 152 | filter: 153 | - query: 154 | query_string: {query: '_exists_:log.message'} 155 | - query: 156 | query_string: {query: 'some_field:12345'} 157 | -------------------------------------------------------------------------------- /docs/source/running_elastalert.rst: -------------------------------------------------------------------------------- 1 | .. _tutorial: 2 | 3 | Running ElastAlert for the First Time 4 | ===================================== 5 | 6 | Requirements 7 | ------------ 8 | 9 | - Elasticsearch 10 | - ISO8601 or Unix timestamped data 11 | - Python 3.6 12 | - pip, see requirements.txt 13 | - Packages on Ubuntu 14.x: python-pip python-dev libffi-dev libssl-dev 14 | 15 | Downloading and Configuring 16 | --------------------------- 17 | 18 | You can either install the latest released version of ElastAlert using pip:: 19 | 20 | $ pip install elastalert 21 | 22 | or you can clone the ElastAlert repository for the most recent changes:: 23 | 24 | $ git clone https://github.com/Yelp/elastalert.git 25 | 26 | Install the module:: 27 | 28 | $ pip install "setuptools>=11.3" 29 | $ python setup.py install 30 | 31 | Depending on the version of Elasticsearch, you may need to manually install the correct version of elasticsearch-py. 32 | 33 | Elasticsearch 5.0+:: 34 | 35 | $ pip install "elasticsearch>=5.0.0" 36 | 37 | Elasticsearch 2.X:: 38 | 39 | $ pip install "elasticsearch<3.0.0" 40 | 41 | Next, open up config.yaml.example. In it, you will find several configuration options. ElastAlert may be run without changing any of these settings. 42 | 43 | ``rules_folder`` is where ElastAlert will load rule configuration files from. It will attempt to load every .yaml file in the folder. Without any valid rules, ElastAlert will not start. ElastAlert will also load new rules, stop running missing rules, and restart modified rules as the files in this folder change. For this tutorial, we will use the example_rules folder. 44 | 45 | ``run_every`` is how often ElastAlert will query Elasticsearch. 46 | 47 | ``buffer_time`` is the size of the query window, stretching backwards from the time each query is run. This value is ignored for rules where ``use_count_query`` or ``use_terms_query`` is set to true. 48 | 49 | ``es_host`` is the address of an Elasticsearch cluster where ElastAlert will store data about its state, queries run, alerts, and errors. Each rule may also use a different Elasticsearch host to query against. 50 | 51 | ``es_port`` is the port corresponding to ``es_host``. 52 | 53 | ``use_ssl``: Optional; whether or not to connect to ``es_host`` using TLS; set to ``True`` or ``False``. 54 | 55 | ``verify_certs``: Optional; whether or not to verify TLS certificates; set to ``True`` or ``False``. The default is ``True`` 56 | 57 | ``client_cert``: Optional; path to a PEM certificate to use as the client certificate 58 | 59 | ``client_key``: Optional; path to a private key file to use as the client key 60 | 61 | ``ca_certs``: Optional; path to a CA cert bundle to use to verify SSL connections 62 | 63 | ``es_username``: Optional; basic-auth username for connecting to ``es_host``. 64 | 65 | ``es_password``: Optional; basic-auth password for connecting to ``es_host``. 66 | 67 | ``es_url_prefix``: Optional; URL prefix for the Elasticsearch endpoint. 68 | 69 | ``es_send_get_body_as``: Optional; Method for querying Elasticsearch - ``GET``, ``POST`` or ``source``. The default is ``GET`` 70 | 71 | ``writeback_index`` is the name of the index in which ElastAlert will store data. We will create this index later. 72 | 73 | ``alert_time_limit`` is the retry window for failed alerts. 74 | 75 | Save the file as ``config.yaml`` 76 | 77 | Setting Up Elasticsearch 78 | ------------------------ 79 | 80 | ElastAlert saves information and metadata about its queries and its alerts back to Elasticsearch. This is useful for auditing, debugging, and it allows ElastAlert to restart and resume exactly where it left off. This is not required for ElastAlert to run, but highly recommended. 81 | 82 | First, we need to create an index for ElastAlert to write to by running ``elastalert-create-index`` and following the instructions:: 83 | 84 | $ elastalert-create-index 85 | New index name (Default elastalert_status) 86 | Name of existing index to copy (Default None) 87 | New index elastalert_status created 88 | Done! 89 | 90 | For information about what data will go here, see :ref:`ElastAlert Metadata Index `. 91 | 92 | Creating a Rule 93 | --------------- 94 | 95 | Each rule defines a query to perform, parameters on what triggers a match, and a list of alerts to fire for each match. We are going to use ``example_rules/example_frequency.yaml`` as a template:: 96 | 97 | # From example_rules/example_frequency.yaml 98 | es_host: elasticsearch.example.com 99 | es_port: 14900 100 | name: Example rule 101 | type: frequency 102 | index: logstash-* 103 | num_events: 50 104 | timeframe: 105 | hours: 4 106 | filter: 107 | - term: 108 | some_field: "some_value" 109 | alert: 110 | - "email" 111 | email: 112 | - "elastalert@example.com" 113 | 114 | ``es_host`` and ``es_port`` should point to the Elasticsearch cluster we want to query. 115 | 116 | ``name`` is the unique name for this rule. ElastAlert will not start if two rules share the same name. 117 | 118 | ``type``: Each rule has a different type which may take different parameters. The ``frequency`` type means "Alert when more than ``num_events`` occur within ``timeframe``." For information other types, see :ref:`Rule types `. 119 | 120 | ``index``: The name of the index(es) to query. If you are using Logstash, by default the indexes will match ``"logstash-*"``. 121 | 122 | ``num_events``: This parameter is specific to ``frequency`` type and is the threshold for when an alert is triggered. 123 | 124 | ``timeframe`` is the time period in which ``num_events`` must occur. 125 | 126 | ``filter`` is a list of Elasticsearch filters that are used to filter results. Here we have a single term filter for documents with ``some_field`` matching ``some_value``. See :ref:`Writing Filters For Rules ` for more information. If no filters are desired, it should be specified as an empty list: ``filter: []`` 127 | 128 | ``alert`` is a list of alerts to run on each match. For more information on alert types, see :ref:`Alerts `. The email alert requires an SMTP server for sending mail. By default, it will attempt to use localhost. This can be changed with the ``smtp_host`` option. 129 | 130 | ``email`` is a list of addresses to which alerts will be sent. 131 | 132 | There are many other optional configuration options, see :ref:`Common configuration options `. 133 | 134 | All documents must have a timestamp field. ElastAlert will try to use ``@timestamp`` by default, but this can be changed with the ``timestamp_field`` option. By default, ElastAlert uses ISO8601 timestamps, though unix timestamps are supported by setting ``timestamp_type``. 135 | 136 | As is, this rule means "Send an email to elastalert@example.com when there are more than 50 documents with ``some_field == some_value`` within a 4 hour period." 137 | 138 | Testing Your Rule 139 | ----------------- 140 | 141 | Running the ``elastalert-test-rule`` tool will test that your config file successfully loads and run it in debug mode over the last 24 hours:: 142 | 143 | $ elastalert-test-rule example_rules/example_frequency.yaml 144 | 145 | If you want to specify a configuration file to use, you can run it with the config flag:: 146 | 147 | $ elastalert-test-rule --config example_rules/example_frequency.yaml 148 | 149 | The configuration preferences will be loaded as follows: 150 | 1. Configurations specified in the yaml file. 151 | 2. Configurations specified in the config file, if specified. 152 | 3. Default configurations, for the tool to run. 153 | 154 | See :ref:`the testing section for more details ` 155 | 156 | Running ElastAlert 157 | ------------------ 158 | 159 | There are two ways of invoking ElastAlert. As a daemon, through Supervisor (http://supervisord.org/), or directly with Python. For easier debugging purposes in this tutorial, we will invoke it directly:: 160 | 161 | $ python -m elastalert.elastalert --verbose --rule example_frequency.yaml # or use the entry point: elastalert --verbose --rule ... 162 | No handlers could be found for logger "Elasticsearch" 163 | INFO:root:Queried rule Example rule from 1-15 14:22 PST to 1-15 15:07 PST: 5 hits 164 | INFO:Elasticsearch:POST http://elasticsearch.example.com:14900/elastalert_status/elastalert_status?op_type=create [status:201 request:0.025s] 165 | INFO:root:Ran Example rule from 1-15 14:22 PST to 1-15 15:07 PST: 5 query hits (0 already seen), 0 matches, 0 alerts sent 166 | INFO:root:Sleeping for 297 seconds 167 | 168 | ElastAlert uses the python logging system and ``--verbose`` sets it to display INFO level messages. ``--rule example_frequency.yaml`` specifies the rule to run, otherwise ElastAlert will attempt to load the other rules in the example_rules folder. 169 | 170 | Let's break down the response to see what's happening. 171 | 172 | ``Queried rule Example rule from 1-15 14:22 PST to 1-15 15:07 PST: 5 hits`` 173 | 174 | ElastAlert periodically queries the most recent ``buffer_time`` (default 45 minutes) for data matching the filters. Here we see that it matched 5 hits. 175 | 176 | ``POST http://elasticsearch.example.com:14900/elastalert_status/elastalert_status?op_type=create [status:201 request:0.025s]`` 177 | 178 | This line showing that ElastAlert uploaded a document to the elastalert_status index with information about the query it just made. 179 | 180 | ``Ran Example rule from 1-15 14:22 PST to 1-15 15:07 PST: 5 query hits (0 already seen), 0 matches, 0 alerts sent`` 181 | 182 | The line means ElastAlert has finished processing the rule. For large time periods, sometimes multiple queries may be run, but their data will be processed together. ``query hits`` is the number of documents that are downloaded from Elasticsearch, ``already seen`` refers to documents that were already counted in a previous overlapping query and will be ignored, ``matches`` is the number of matches the rule type outputted, and ``alerts sent`` is the number of alerts actually sent. This may differ from ``matches`` because of options like ``realert`` and ``aggregation`` or because of an error. 183 | 184 | ``Sleeping for 297 seconds`` 185 | 186 | The default ``run_every`` is 5 minutes, meaning ElastAlert will sleep until 5 minutes have elapsed from the last cycle before running queries for each rule again with time ranges shifted forward 5 minutes. 187 | 188 | Say, over the next 297 seconds, 46 more matching documents were added to Elasticsearch:: 189 | 190 | 191 | INFO:root:Queried rule Example rule from 1-15 14:27 PST to 1-15 15:12 PST: 51 hits 192 | ... 193 | INFO:root:Sent email to ['elastalert@example.com'] 194 | ... 195 | INFO:root:Ran Example rule from 1-15 14:27 PST to 1-15 15:12 PST: 51 query hits, 1 matches, 1 alerts sent 196 | 197 | The body of the email will contain something like:: 198 | 199 | Example rule 200 | 201 | At least 50 events occurred between 1-15 11:12 PST and 1-15 15:12 PST 202 | 203 | @timestamp: 2015-01-15T15:12:00-08:00 204 | 205 | If an error occurred, such as an unreachable SMTP server, you may see: 206 | 207 | 208 | ``ERROR:root:Error while running alert email: Error connecting to SMTP host: [Errno 61] Connection refused`` 209 | 210 | 211 | Note that if you stop ElastAlert and then run it again later, it will look up ``elastalert_status`` and begin querying 212 | at the end time of the last query. This is to prevent duplication or skipping of alerts if ElastAlert is restarted. 213 | 214 | By using the ``--debug`` flag instead of ``--verbose``, the body of email will instead be logged and the email will not be sent. In addition, the queries will not be saved to ``elastalert_status``. 215 | -------------------------------------------------------------------------------- /elastalert/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import copy 3 | import time 4 | 5 | from elasticsearch import Elasticsearch 6 | from elasticsearch import RequestsHttpConnection 7 | from elasticsearch.client import _make_path 8 | from elasticsearch.client import query_params 9 | from elasticsearch.exceptions import TransportError 10 | 11 | 12 | class ElasticSearchClient(Elasticsearch): 13 | """ Extension of low level :class:`Elasticsearch` client with additional version resolving features """ 14 | 15 | def __init__(self, conf): 16 | """ 17 | :arg conf: es_conn_config dictionary. Ref. :func:`~util.build_es_conn_config` 18 | """ 19 | super(ElasticSearchClient, self).__init__(host=conf['es_host'], 20 | port=conf['es_port'], 21 | url_prefix=conf['es_url_prefix'], 22 | use_ssl=conf['use_ssl'], 23 | verify_certs=conf['verify_certs'], 24 | ca_certs=conf['ca_certs'], 25 | connection_class=RequestsHttpConnection, 26 | http_auth=conf['http_auth'], 27 | timeout=conf['es_conn_timeout'], 28 | send_get_body_as=conf['send_get_body_as'], 29 | client_cert=conf['client_cert'], 30 | client_key=conf['client_key']) 31 | self._conf = copy.copy(conf) 32 | self._es_version = None 33 | 34 | @property 35 | def conf(self): 36 | """ 37 | Returns the provided es_conn_config used when initializing the class instance. 38 | """ 39 | return self._conf 40 | 41 | @property 42 | def es_version(self): 43 | """ 44 | Returns the reported version from the Elasticsearch server. 45 | """ 46 | if self._es_version is None: 47 | for retry in range(3): 48 | try: 49 | self._es_version = self.info()['version']['number'] 50 | break 51 | except TransportError: 52 | if retry == 2: 53 | raise 54 | time.sleep(3) 55 | return self._es_version 56 | 57 | def is_atleastfive(self): 58 | """ 59 | Returns True when the Elasticsearch server version >= 5 60 | """ 61 | return int(self.es_version.split(".")[0]) >= 5 62 | 63 | def is_atleastsix(self): 64 | """ 65 | Returns True when the Elasticsearch server version >= 6 66 | """ 67 | return int(self.es_version.split(".")[0]) >= 6 68 | 69 | def is_atleastsixtwo(self): 70 | """ 71 | Returns True when the Elasticsearch server version >= 6.2 72 | """ 73 | major, minor = list(map(int, self.es_version.split(".")[:2])) 74 | return major > 6 or (major == 6 and minor >= 2) 75 | 76 | def is_atleastsixsix(self): 77 | """ 78 | Returns True when the Elasticsearch server version >= 6.6 79 | """ 80 | major, minor = list(map(int, self.es_version.split(".")[:2])) 81 | return major > 6 or (major == 6 and minor >= 6) 82 | 83 | def is_atleastseven(self): 84 | """ 85 | Returns True when the Elasticsearch server version >= 7 86 | """ 87 | return int(self.es_version.split(".")[0]) >= 7 88 | 89 | def resolve_writeback_index(self, writeback_index, doc_type): 90 | """ In ES6, you cannot have multiple _types per index, 91 | therefore we use self.writeback_index as the prefix for the actual 92 | index name, based on doc_type. """ 93 | if not self.is_atleastsix(): 94 | return writeback_index 95 | elif doc_type == 'silence': 96 | return writeback_index + '_silence' 97 | elif doc_type == 'past_elastalert': 98 | return writeback_index + '_past' 99 | elif doc_type == 'elastalert_status': 100 | return writeback_index + '_status' 101 | elif doc_type == 'elastalert_error': 102 | return writeback_index + '_error' 103 | return writeback_index 104 | 105 | @query_params( 106 | "_source", 107 | "_source_exclude", 108 | "_source_excludes", 109 | "_source_include", 110 | "_source_includes", 111 | "allow_no_indices", 112 | "allow_partial_search_results", 113 | "analyze_wildcard", 114 | "analyzer", 115 | "batched_reduce_size", 116 | "default_operator", 117 | "df", 118 | "docvalue_fields", 119 | "expand_wildcards", 120 | "explain", 121 | "from_", 122 | "ignore_unavailable", 123 | "lenient", 124 | "max_concurrent_shard_requests", 125 | "pre_filter_shard_size", 126 | "preference", 127 | "q", 128 | "rest_total_hits_as_int", 129 | "request_cache", 130 | "routing", 131 | "scroll", 132 | "search_type", 133 | "seq_no_primary_term", 134 | "size", 135 | "sort", 136 | "stats", 137 | "stored_fields", 138 | "suggest_field", 139 | "suggest_mode", 140 | "suggest_size", 141 | "suggest_text", 142 | "terminate_after", 143 | "timeout", 144 | "track_scores", 145 | "track_total_hits", 146 | "typed_keys", 147 | "version", 148 | ) 149 | def deprecated_search(self, index=None, doc_type=None, body=None, params=None): 150 | """ 151 | Execute a search query and get back search hits that match the query. 152 | ``_ 153 | :arg index: A list of index names to search, or a string containing a 154 | comma-separated list of index names to search; use `_all` 155 | or empty string to perform the operation on all indices 156 | :arg doc_type: A comma-separated list of document types to search; leave 157 | empty to perform the operation on all types 158 | :arg body: The search definition using the Query DSL 159 | :arg _source: True or false to return the _source field or not, or a 160 | list of fields to return 161 | :arg _source_exclude: A list of fields to exclude from the returned 162 | _source field 163 | :arg _source_include: A list of fields to extract and return from the 164 | _source field 165 | :arg allow_no_indices: Whether to ignore if a wildcard indices 166 | expression resolves into no concrete indices. (This includes `_all` 167 | string or when no indices have been specified) 168 | :arg allow_partial_search_results: Set to false to return an overall 169 | failure if the request would produce partial results. Defaults to 170 | True, which will allow partial results in the case of timeouts or 171 | partial failures 172 | :arg analyze_wildcard: Specify whether wildcard and prefix queries 173 | should be analyzed (default: false) 174 | :arg analyzer: The analyzer to use for the query string 175 | :arg batched_reduce_size: The number of shard results that should be 176 | reduced at once on the coordinating node. This value should be used 177 | as a protection mechanism to reduce the memory overhead per search 178 | request if the potential number of shards in the request can be 179 | large., default 512 180 | :arg default_operator: The default operator for query string query (AND 181 | or OR), default 'OR', valid choices are: 'AND', 'OR' 182 | :arg df: The field to use as default where no field prefix is given in 183 | the query string 184 | :arg docvalue_fields: A comma-separated list of fields to return as the 185 | docvalue representation of a field for each hit 186 | :arg expand_wildcards: Whether to expand wildcard expression to concrete 187 | indices that are open, closed or both., default 'open', valid 188 | choices are: 'open', 'closed', 'none', 'all' 189 | :arg explain: Specify whether to return detailed information about score 190 | computation as part of a hit 191 | :arg from\\_: Starting offset (default: 0) 192 | :arg ignore_unavailable: Whether specified concrete indices should be 193 | ignored when unavailable (missing or closed) 194 | :arg lenient: Specify whether format-based query failures (such as 195 | providing text to a numeric field) should be ignored 196 | :arg max_concurrent_shard_requests: The number of concurrent shard 197 | requests this search executes concurrently. This value should be 198 | used to limit the impact of the search on the cluster in order to 199 | limit the number of concurrent shard requests, default 'The default 200 | grows with the number of nodes in the cluster but is at most 256.' 201 | :arg pre_filter_shard_size: A threshold that enforces a pre-filter 202 | roundtrip to prefilter search shards based on query rewriting if 203 | the number of shards the search request expands to exceeds the 204 | threshold. This filter roundtrip can limit the number of shards 205 | significantly if for instance a shard can not match any documents 206 | based on it's rewrite method ie. if date filters are mandatory to 207 | match but the shard bounds and the query are disjoint., default 128 208 | :arg preference: Specify the node or shard the operation should be 209 | performed on (default: random) 210 | :arg q: Query in the Lucene query string syntax 211 | :arg rest_total_hits_as_int: This parameter is used to restore the total hits as a number 212 | in the response. This param is added version 6.x to handle mixed cluster queries where nodes 213 | are in multiple versions (7.0 and 6.latest) 214 | :arg request_cache: Specify if request cache should be used for this 215 | request or not, defaults to index level setting 216 | :arg routing: A comma-separated list of specific routing values 217 | :arg scroll: Specify how long a consistent view of the index should be 218 | maintained for scrolled search 219 | :arg search_type: Search operation type, valid choices are: 220 | 'query_then_fetch', 'dfs_query_then_fetch' 221 | :arg size: Number of hits to return (default: 10) 222 | :arg sort: A comma-separated list of : pairs 223 | :arg stats: Specific 'tag' of the request for logging and statistical 224 | purposes 225 | :arg stored_fields: A comma-separated list of stored fields to return as 226 | part of a hit 227 | :arg suggest_field: Specify which field to use for suggestions 228 | :arg suggest_mode: Specify suggest mode, default 'missing', valid 229 | choices are: 'missing', 'popular', 'always' 230 | :arg suggest_size: How many suggestions to return in response 231 | :arg suggest_text: The source text for which the suggestions should be 232 | returned 233 | :arg terminate_after: The maximum number of documents to collect for 234 | each shard, upon reaching which the query execution will terminate 235 | early. 236 | :arg timeout: Explicit operation timeout 237 | :arg track_scores: Whether to calculate and return scores even if they 238 | are not used for sorting 239 | :arg track_total_hits: Indicate if the number of documents that match 240 | the query should be tracked 241 | :arg typed_keys: Specify whether aggregation and suggester names should 242 | be prefixed by their respective types in the response 243 | :arg version: Specify whether to return document version as part of a 244 | hit 245 | """ 246 | # from is a reserved word so it cannot be used, use from_ instead 247 | if "from_" in params: 248 | params["from"] = params.pop("from_") 249 | 250 | if not index: 251 | index = "_all" 252 | res = self.transport.perform_request( 253 | "GET", _make_path(index, doc_type, "_search"), params=params, body=body 254 | ) 255 | if type(res) == list or type(res) == tuple: 256 | return res[1] 257 | return res 258 | -------------------------------------------------------------------------------- /elastalert/auth.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import os 3 | import boto3 4 | from aws_requests_auth.aws_auth import AWSRequestsAuth 5 | 6 | 7 | class RefeshableAWSRequestsAuth(AWSRequestsAuth): 8 | """ 9 | A class ensuring that AWS request signing uses a refreshed credential 10 | """ 11 | 12 | def __init__(self, 13 | refreshable_credential, 14 | aws_host, 15 | aws_region, 16 | aws_service): 17 | """ 18 | :param refreshable_credential: A credential class that refreshes STS or IAM Instance Profile credentials 19 | :type refreshable_credential: :class:`botocore.credentials.RefreshableCredentials` 20 | """ 21 | self.refreshable_credential = refreshable_credential 22 | self.aws_host = aws_host 23 | self.aws_region = aws_region 24 | self.service = aws_service 25 | 26 | @property 27 | def aws_access_key(self): 28 | return self.refreshable_credential.access_key 29 | 30 | @property 31 | def aws_secret_access_key(self): 32 | return self.refreshable_credential.secret_key 33 | 34 | @property 35 | def aws_token(self): 36 | return self.refreshable_credential.token 37 | 38 | 39 | class Auth(object): 40 | 41 | def __call__(self, host, username, password, aws_region, profile_name): 42 | """ Return the authorization header. 43 | 44 | :param host: Elasticsearch host. 45 | :param username: Username used for authenticating the requests to Elasticsearch. 46 | :param password: Password used for authenticating the requests to Elasticsearch. 47 | :param aws_region: AWS Region to use. Only required when signing requests. 48 | :param profile_name: AWS profile to use for connecting. Only required when signing requests. 49 | """ 50 | if username and password: 51 | return username + ':' + password 52 | 53 | if not aws_region and not os.environ.get('AWS_DEFAULT_REGION'): 54 | return None 55 | 56 | session = boto3.session.Session(profile_name=profile_name, region_name=aws_region) 57 | 58 | return RefeshableAWSRequestsAuth( 59 | refreshable_credential=session.get_credentials(), 60 | aws_host=host, 61 | aws_region=session.region_name, 62 | aws_service='es') 63 | -------------------------------------------------------------------------------- /elastalert/config.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import datetime 3 | import logging 4 | import logging.config 5 | 6 | from envparse import Env 7 | from staticconf.loader import yaml_loader 8 | 9 | from . import loaders 10 | from .util import EAException 11 | from .util import elastalert_logger 12 | from .util import get_module 13 | 14 | # Required global (config.yaml) configuration options 15 | required_globals = frozenset(['run_every', 'es_host', 'es_port', 'writeback_index', 'buffer_time']) 16 | 17 | # Settings that can be derived from ENV variables 18 | env_settings = {'ES_USE_SSL': 'use_ssl', 19 | 'ES_PASSWORD': 'es_password', 20 | 'ES_USERNAME': 'es_username', 21 | 'ES_HOST': 'es_host', 22 | 'ES_PORT': 'es_port', 23 | 'ES_URL_PREFIX': 'es_url_prefix'} 24 | 25 | env = Env(ES_USE_SSL=bool) 26 | 27 | 28 | # Used to map the names of rule loaders to their classes 29 | loader_mapping = { 30 | 'file': loaders.FileRulesLoader, 31 | } 32 | 33 | 34 | def load_conf(args, defaults=None, overwrites=None): 35 | """ Creates a conf dictionary for ElastAlerter. Loads the global 36 | config file and then each rule found in rules_folder. 37 | 38 | :param args: The parsed arguments to ElastAlert 39 | :param defaults: Dictionary of default conf values 40 | :param overwrites: Dictionary of conf values to override 41 | :return: The global configuration, a dictionary. 42 | """ 43 | filename = args.config 44 | if filename: 45 | conf = yaml_loader(filename) 46 | else: 47 | try: 48 | conf = yaml_loader('config.yaml') 49 | except FileNotFoundError: 50 | raise EAException('No --config or config.yaml found') 51 | 52 | # init logging from config and set log levels according to command line options 53 | configure_logging(args, conf) 54 | 55 | for env_var, conf_var in list(env_settings.items()): 56 | val = env(env_var, None) 57 | if val is not None: 58 | conf[conf_var] = val 59 | 60 | for key, value in (iter(defaults.items()) if defaults is not None else []): 61 | if key not in conf: 62 | conf[key] = value 63 | 64 | for key, value in (iter(overwrites.items()) if overwrites is not None else []): 65 | conf[key] = value 66 | 67 | # Make sure we have all required globals 68 | if required_globals - frozenset(list(conf.keys())): 69 | raise EAException('%s must contain %s' % (filename, ', '.join(required_globals - frozenset(list(conf.keys()))))) 70 | 71 | conf.setdefault('writeback_alias', 'elastalert_alerts') 72 | conf.setdefault('max_query_size', 10000) 73 | conf.setdefault('scroll_keepalive', '30s') 74 | conf.setdefault('max_scrolling_count', 0) 75 | conf.setdefault('disable_rules_on_error', True) 76 | conf.setdefault('scan_subdirectories', True) 77 | conf.setdefault('rules_loader', 'file') 78 | 79 | # Convert run_every, buffer_time into a timedelta object 80 | try: 81 | conf['run_every'] = datetime.timedelta(**conf['run_every']) 82 | conf['buffer_time'] = datetime.timedelta(**conf['buffer_time']) 83 | if 'alert_time_limit' in conf: 84 | conf['alert_time_limit'] = datetime.timedelta(**conf['alert_time_limit']) 85 | else: 86 | conf['alert_time_limit'] = datetime.timedelta(days=2) 87 | if 'old_query_limit' in conf: 88 | conf['old_query_limit'] = datetime.timedelta(**conf['old_query_limit']) 89 | else: 90 | conf['old_query_limit'] = datetime.timedelta(weeks=1) 91 | except (KeyError, TypeError) as e: 92 | raise EAException('Invalid time format used: %s' % e) 93 | 94 | # Initialise the rule loader and load each rule configuration 95 | rules_loader_class = loader_mapping.get(conf['rules_loader']) or get_module(conf['rules_loader']) 96 | rules_loader = rules_loader_class(conf) 97 | conf['rules_loader'] = rules_loader 98 | # Make sure we have all the required globals for the loader 99 | # Make sure we have all required globals 100 | if rules_loader.required_globals - frozenset(list(conf.keys())): 101 | raise EAException( 102 | '%s must contain %s' % (filename, ', '.join(rules_loader.required_globals - frozenset(list(conf.keys()))))) 103 | 104 | return conf 105 | 106 | 107 | def configure_logging(args, conf): 108 | # configure logging from config file if provided 109 | if 'logging' in conf: 110 | # load new logging config 111 | logging.config.dictConfig(conf['logging']) 112 | 113 | if args.verbose and args.debug: 114 | elastalert_logger.info( 115 | "Note: --debug and --verbose flags are set. --debug takes precedent." 116 | ) 117 | 118 | # re-enable INFO log level on elastalert_logger in verbose/debug mode 119 | # (but don't touch it if it is already set to INFO or below by config) 120 | if args.verbose or args.debug: 121 | if elastalert_logger.level > logging.INFO or elastalert_logger.level == logging.NOTSET: 122 | elastalert_logger.setLevel(logging.INFO) 123 | 124 | if args.debug: 125 | elastalert_logger.info( 126 | """Note: In debug mode, alerts will be logged to console but NOT actually sent. 127 | To send them but remain verbose, use --verbose instead.""" 128 | ) 129 | 130 | if not args.es_debug and 'logging' not in conf: 131 | logging.getLogger('elasticsearch').setLevel(logging.WARNING) 132 | 133 | if args.es_debug_trace: 134 | tracer = logging.getLogger('elasticsearch.trace') 135 | tracer.setLevel(logging.INFO) 136 | tracer.addHandler(logging.FileHandler(args.es_debug_trace)) 137 | -------------------------------------------------------------------------------- /elastalert/create_index.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | import argparse 4 | import getpass 5 | import json 6 | import os 7 | import time 8 | 9 | import elasticsearch.helpers 10 | import yaml 11 | from elasticsearch import RequestsHttpConnection 12 | from elasticsearch.client import Elasticsearch 13 | from elasticsearch.client import IndicesClient 14 | from elasticsearch.exceptions import NotFoundError 15 | from envparse import Env 16 | 17 | from .auth import Auth 18 | 19 | env = Env(ES_USE_SSL=bool) 20 | 21 | 22 | def create_index_mappings(es_client, ea_index, recreate=False, old_ea_index=None): 23 | esversion = es_client.info()["version"]["number"] 24 | print("Elastic Version: " + esversion) 25 | 26 | es_index_mappings = read_es_index_mappings() if is_atleastsix(esversion) else read_es_index_mappings(5) 27 | 28 | es_index = IndicesClient(es_client) 29 | if not recreate: 30 | if es_index.exists(ea_index): 31 | print('Index ' + ea_index + ' already exists. Skipping index creation.') 32 | return None 33 | 34 | # (Re-)Create indices. 35 | if is_atleastsix(esversion): 36 | index_names = ( 37 | ea_index, 38 | ea_index + '_status', 39 | ea_index + '_silence', 40 | ea_index + '_error', 41 | ea_index + '_past', 42 | ) 43 | else: 44 | index_names = ( 45 | ea_index, 46 | ) 47 | for index_name in index_names: 48 | if es_index.exists(index_name): 49 | print('Deleting index ' + index_name + '.') 50 | try: 51 | es_index.delete(index_name) 52 | except NotFoundError: 53 | # Why does this ever occur?? It shouldn't. But it does. 54 | pass 55 | es_index.create(index_name) 56 | 57 | # To avoid a race condition. TODO: replace this with a real check 58 | time.sleep(2) 59 | 60 | if is_atleastseven(esversion): 61 | # TODO remove doc_type completely when elasicsearch client allows doc_type=None 62 | # doc_type is a deprecated feature and will be completely removed in Elasicsearch 8 63 | es_client.indices.put_mapping(index=ea_index, doc_type='_doc', 64 | body=es_index_mappings['elastalert'], include_type_name=True) 65 | es_client.indices.put_mapping(index=ea_index + '_status', doc_type='_doc', 66 | body=es_index_mappings['elastalert_status'], include_type_name=True) 67 | es_client.indices.put_mapping(index=ea_index + '_silence', doc_type='_doc', 68 | body=es_index_mappings['silence'], include_type_name=True) 69 | es_client.indices.put_mapping(index=ea_index + '_error', doc_type='_doc', 70 | body=es_index_mappings['elastalert_error'], include_type_name=True) 71 | es_client.indices.put_mapping(index=ea_index + '_past', doc_type='_doc', 72 | body=es_index_mappings['past_elastalert'], include_type_name=True) 73 | elif is_atleastsixtwo(esversion): 74 | es_client.indices.put_mapping(index=ea_index, doc_type='_doc', 75 | body=es_index_mappings['elastalert']) 76 | es_client.indices.put_mapping(index=ea_index + '_status', doc_type='_doc', 77 | body=es_index_mappings['elastalert_status']) 78 | es_client.indices.put_mapping(index=ea_index + '_silence', doc_type='_doc', 79 | body=es_index_mappings['silence']) 80 | es_client.indices.put_mapping(index=ea_index + '_error', doc_type='_doc', 81 | body=es_index_mappings['elastalert_error']) 82 | es_client.indices.put_mapping(index=ea_index + '_past', doc_type='_doc', 83 | body=es_index_mappings['past_elastalert']) 84 | elif is_atleastsix(esversion): 85 | es_client.indices.put_mapping(index=ea_index, doc_type='elastalert', 86 | body=es_index_mappings['elastalert']) 87 | es_client.indices.put_mapping(index=ea_index + '_status', doc_type='elastalert_status', 88 | body=es_index_mappings['elastalert_status']) 89 | es_client.indices.put_mapping(index=ea_index + '_silence', doc_type='silence', 90 | body=es_index_mappings['silence']) 91 | es_client.indices.put_mapping(index=ea_index + '_error', doc_type='elastalert_error', 92 | body=es_index_mappings['elastalert_error']) 93 | es_client.indices.put_mapping(index=ea_index + '_past', doc_type='past_elastalert', 94 | body=es_index_mappings['past_elastalert']) 95 | else: 96 | es_client.indices.put_mapping(index=ea_index, doc_type='elastalert', 97 | body=es_index_mappings['elastalert']) 98 | es_client.indices.put_mapping(index=ea_index, doc_type='elastalert_status', 99 | body=es_index_mappings['elastalert_status']) 100 | es_client.indices.put_mapping(index=ea_index, doc_type='silence', 101 | body=es_index_mappings['silence']) 102 | es_client.indices.put_mapping(index=ea_index, doc_type='elastalert_error', 103 | body=es_index_mappings['elastalert_error']) 104 | es_client.indices.put_mapping(index=ea_index, doc_type='past_elastalert', 105 | body=es_index_mappings['past_elastalert']) 106 | 107 | print('New index %s created' % ea_index) 108 | if old_ea_index: 109 | print("Copying all data from old index '{0}' to new index '{1}'".format(old_ea_index, ea_index)) 110 | # Use the defaults for chunk_size, scroll, scan_kwargs, and bulk_kwargs 111 | elasticsearch.helpers.reindex(es_client, old_ea_index, ea_index) 112 | 113 | print('Done!') 114 | 115 | 116 | def read_es_index_mappings(es_version=6): 117 | print('Reading Elastic {0} index mappings:'.format(es_version)) 118 | return { 119 | 'silence': read_es_index_mapping('silence', es_version), 120 | 'elastalert_status': read_es_index_mapping('elastalert_status', es_version), 121 | 'elastalert': read_es_index_mapping('elastalert', es_version), 122 | 'past_elastalert': read_es_index_mapping('past_elastalert', es_version), 123 | 'elastalert_error': read_es_index_mapping('elastalert_error', es_version) 124 | } 125 | 126 | 127 | def read_es_index_mapping(mapping, es_version=6): 128 | base_path = os.path.abspath(os.path.dirname(__file__)) 129 | mapping_path = 'es_mappings/{0}/{1}.json'.format(es_version, mapping) 130 | path = os.path.join(base_path, mapping_path) 131 | with open(path, 'r') as f: 132 | print("Reading index mapping '{0}'".format(mapping_path)) 133 | return json.load(f) 134 | 135 | 136 | def is_atleastsix(es_version): 137 | return int(es_version.split(".")[0]) >= 6 138 | 139 | 140 | def is_atleastsixtwo(es_version): 141 | major, minor = list(map(int, es_version.split(".")[:2])) 142 | return major > 6 or (major == 6 and minor >= 2) 143 | 144 | 145 | def is_atleastseven(es_version): 146 | return int(es_version.split(".")[0]) >= 7 147 | 148 | 149 | def main(): 150 | parser = argparse.ArgumentParser() 151 | parser.add_argument('--host', default=os.environ.get('ES_HOST', None), help='Elasticsearch host') 152 | parser.add_argument('--port', default=os.environ.get('ES_PORT', None), type=int, help='Elasticsearch port') 153 | parser.add_argument('--username', default=os.environ.get('ES_USERNAME', None), help='Elasticsearch username') 154 | parser.add_argument('--password', default=os.environ.get('ES_PASSWORD', None), help='Elasticsearch password') 155 | parser.add_argument('--url-prefix', help='Elasticsearch URL prefix') 156 | parser.add_argument('--no-auth', action='store_const', const=True, help='Suppress prompt for basic auth') 157 | parser.add_argument('--ssl', action='store_true', default=env('ES_USE_SSL', None), help='Use TLS') 158 | parser.add_argument('--no-ssl', dest='ssl', action='store_false', help='Do not use TLS') 159 | parser.add_argument('--verify-certs', action='store_true', default=None, help='Verify TLS certificates') 160 | parser.add_argument('--no-verify-certs', dest='verify_certs', action='store_false', 161 | help='Do not verify TLS certificates') 162 | parser.add_argument('--index', help='Index name to create') 163 | parser.add_argument('--alias', help='Alias name to create') 164 | parser.add_argument('--old-index', help='Old index name to copy') 165 | parser.add_argument('--send_get_body_as', default='GET', 166 | help='Method for querying Elasticsearch - POST, GET or source') 167 | parser.add_argument( 168 | '--boto-profile', 169 | default=None, 170 | dest='profile', 171 | help='DEPRECATED: (use --profile) Boto profile to use for signing requests') 172 | parser.add_argument( 173 | '--profile', 174 | default=None, 175 | help='AWS profile to use for signing requests. Optionally use the AWS_DEFAULT_PROFILE environment variable') 176 | parser.add_argument( 177 | '--aws-region', 178 | default=None, 179 | help='AWS Region to use for signing requests. Optionally use the AWS_DEFAULT_REGION environment variable') 180 | parser.add_argument('--timeout', default=60, type=int, help='Elasticsearch request timeout') 181 | parser.add_argument('--config', default='config.yaml', help='Global config file (default: config.yaml)') 182 | parser.add_argument('--recreate', type=bool, default=False, 183 | help='Force re-creation of the index (this will cause data loss).') 184 | args = parser.parse_args() 185 | 186 | if os.path.isfile(args.config): 187 | filename = args.config 188 | elif os.path.isfile('../config.yaml'): 189 | filename = '../config.yaml' 190 | else: 191 | filename = '' 192 | 193 | if filename: 194 | with open(filename) as config_file: 195 | data = yaml.load(config_file, Loader=yaml.FullLoader) 196 | host = args.host if args.host else data.get('es_host') 197 | port = args.port if args.port else data.get('es_port') 198 | username = args.username if args.username else data.get('es_username') 199 | password = args.password if args.password else data.get('es_password') 200 | url_prefix = args.url_prefix if args.url_prefix is not None else data.get('es_url_prefix', '') 201 | use_ssl = args.ssl if args.ssl is not None else data.get('use_ssl') 202 | verify_certs = args.verify_certs if args.verify_certs is not None else data.get('verify_certs') is not False 203 | aws_region = data.get('aws_region', None) 204 | send_get_body_as = data.get('send_get_body_as', 'GET') 205 | ca_certs = data.get('ca_certs') 206 | client_cert = data.get('client_cert') 207 | client_key = data.get('client_key') 208 | index = args.index if args.index is not None else data.get('writeback_index') 209 | alias = args.alias if args.alias is not None else data.get('writeback_alias') 210 | old_index = args.old_index if args.old_index is not None else None 211 | else: 212 | username = args.username if args.username else None 213 | password = args.password if args.password else None 214 | aws_region = args.aws_region 215 | host = args.host if args.host else input('Enter Elasticsearch host: ') 216 | port = args.port if args.port else int(input('Enter Elasticsearch port: ')) 217 | use_ssl = (args.ssl if args.ssl is not None 218 | else input('Use SSL? t/f: ').lower() in ('t', 'true')) 219 | if use_ssl: 220 | verify_certs = (args.verify_certs if args.verify_certs is not None 221 | else input('Verify TLS certificates? t/f: ').lower() not in ('f', 'false')) 222 | else: 223 | verify_certs = True 224 | if args.no_auth is None and username is None: 225 | username = input('Enter optional basic-auth username (or leave blank): ') 226 | password = getpass.getpass('Enter optional basic-auth password (or leave blank): ') 227 | url_prefix = (args.url_prefix if args.url_prefix is not None 228 | else input('Enter optional Elasticsearch URL prefix (prepends a string to the URL of every request): ')) 229 | send_get_body_as = args.send_get_body_as 230 | ca_certs = None 231 | client_cert = None 232 | client_key = None 233 | index = args.index if args.index is not None else input('New index name? (Default elastalert_status) ') 234 | if not index: 235 | index = 'elastalert_status' 236 | alias = args.alias if args.alias is not None else input('New alias name? (Default elastalert_alerts) ') 237 | if not alias: 238 | alias = 'elastalert_alias' 239 | old_index = (args.old_index if args.old_index is not None 240 | else input('Name of existing index to copy? (Default None) ')) 241 | 242 | timeout = args.timeout 243 | 244 | auth = Auth() 245 | http_auth = auth(host=host, 246 | username=username, 247 | password=password, 248 | aws_region=aws_region, 249 | profile_name=args.profile) 250 | es = Elasticsearch( 251 | host=host, 252 | port=port, 253 | timeout=timeout, 254 | use_ssl=use_ssl, 255 | verify_certs=verify_certs, 256 | connection_class=RequestsHttpConnection, 257 | http_auth=http_auth, 258 | url_prefix=url_prefix, 259 | send_get_body_as=send_get_body_as, 260 | client_cert=client_cert, 261 | ca_certs=ca_certs, 262 | client_key=client_key) 263 | 264 | create_index_mappings(es_client=es, ea_index=index, recreate=args.recreate, old_ea_index=old_index) 265 | 266 | 267 | if __name__ == '__main__': 268 | main() 269 | -------------------------------------------------------------------------------- /elastalert/enhancements.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | from .util import pretty_ts 3 | 4 | 5 | class BaseEnhancement(object): 6 | """ Enhancements take a match dictionary object and modify it in some way to 7 | enhance an alert. These are specified in each rule under the match_enhancements option. 8 | Generally, the key value pairs in the match module will be contained in the alert body. """ 9 | 10 | def __init__(self, rule): 11 | self.rule = rule 12 | 13 | def process(self, match): 14 | """ Modify the contents of match, a dictionary, in some way """ 15 | raise NotImplementedError() 16 | 17 | 18 | class TimeEnhancement(BaseEnhancement): 19 | def process(self, match): 20 | match['@timestamp'] = pretty_ts(match['@timestamp']) 21 | 22 | 23 | class DropMatchException(Exception): 24 | """ ElastAlert will drop a match if this exception type is raised by an enhancement """ 25 | pass 26 | -------------------------------------------------------------------------------- /elastalert/es_mappings/5/elastalert.json: -------------------------------------------------------------------------------- 1 | { 2 | "elastalert": { 3 | "properties": { 4 | "rule_name": { 5 | "index": "not_analyzed", 6 | "type": "string" 7 | }, 8 | "@timestamp": { 9 | "type": "date", 10 | "format": "dateOptionalTime" 11 | }, 12 | "alert_time": { 13 | "type": "date", 14 | "format": "dateOptionalTime" 15 | }, 16 | "match_time": { 17 | "type": "date", 18 | "format": "dateOptionalTime" 19 | }, 20 | "match_body": { 21 | "type": "object", 22 | "enabled": "false" 23 | }, 24 | "aggregate_id": { 25 | "index": "not_analyzed", 26 | "type": "string" 27 | } 28 | } 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /elastalert/es_mappings/5/elastalert_error.json: -------------------------------------------------------------------------------- 1 | { 2 | "elastalert_error": { 3 | "properties": { 4 | "data": { 5 | "type": "object", 6 | "enabled": "false" 7 | }, 8 | "@timestamp": { 9 | "type": "date", 10 | "format": "dateOptionalTime" 11 | } 12 | } 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /elastalert/es_mappings/5/elastalert_status.json: -------------------------------------------------------------------------------- 1 | { 2 | "elastalert_status": { 3 | "properties": { 4 | "rule_name": { 5 | "index": "not_analyzed", 6 | "type": "string" 7 | }, 8 | "@timestamp": { 9 | "type": "date", 10 | "format": "dateOptionalTime" 11 | } 12 | } 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /elastalert/es_mappings/5/past_elastalert.json: -------------------------------------------------------------------------------- 1 | { 2 | "past_elastalert": { 3 | "properties": { 4 | "rule_name": { 5 | "index": "not_analyzed", 6 | "type": "string" 7 | }, 8 | "match_body": { 9 | "type": "object", 10 | "enabled": "false" 11 | }, 12 | "@timestamp": { 13 | "type": "date", 14 | "format": "dateOptionalTime" 15 | }, 16 | "aggregate_id": { 17 | "index": "not_analyzed", 18 | "type": "string" 19 | } 20 | } 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /elastalert/es_mappings/5/silence.json: -------------------------------------------------------------------------------- 1 | { 2 | "silence": { 3 | "properties": { 4 | "rule_name": { 5 | "index": "not_analyzed", 6 | "type": "string" 7 | }, 8 | "until": { 9 | "type": "date", 10 | "format": "dateOptionalTime" 11 | }, 12 | "@timestamp": { 13 | "type": "date", 14 | "format": "dateOptionalTime" 15 | } 16 | } 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /elastalert/es_mappings/6/elastalert.json: -------------------------------------------------------------------------------- 1 | { 2 | "numeric_detection": true, 3 | "date_detection": false, 4 | "dynamic_templates": [ 5 | { 6 | "strings_as_keyword": { 7 | "mapping": { 8 | "ignore_above": 1024, 9 | "type": "keyword" 10 | }, 11 | "match_mapping_type": "string" 12 | } 13 | } 14 | ], 15 | "properties": { 16 | "rule_name": { 17 | "type": "keyword" 18 | }, 19 | "@timestamp": { 20 | "type": "date", 21 | "format": "dateOptionalTime" 22 | }, 23 | "alert_time": { 24 | "type": "date", 25 | "format": "dateOptionalTime" 26 | }, 27 | "match_time": { 28 | "type": "date", 29 | "format": "dateOptionalTime" 30 | }, 31 | "match_body": { 32 | "type": "object" 33 | }, 34 | "aggregate_id": { 35 | "type": "keyword" 36 | } 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /elastalert/es_mappings/6/elastalert_error.json: -------------------------------------------------------------------------------- 1 | { 2 | "properties": { 3 | "data": { 4 | "type": "object", 5 | "enabled": "false" 6 | }, 7 | "@timestamp": { 8 | "type": "date", 9 | "format": "dateOptionalTime" 10 | } 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /elastalert/es_mappings/6/elastalert_status.json: -------------------------------------------------------------------------------- 1 | { 2 | "properties": { 3 | "rule_name": { 4 | "type": "keyword" 5 | }, 6 | "@timestamp": { 7 | "type": "date", 8 | "format": "dateOptionalTime" 9 | } 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /elastalert/es_mappings/6/past_elastalert.json: -------------------------------------------------------------------------------- 1 | { 2 | "properties": { 3 | "rule_name": { 4 | "type": "keyword" 5 | }, 6 | "match_body": { 7 | "type": "object", 8 | "enabled": "false" 9 | }, 10 | "@timestamp": { 11 | "type": "date", 12 | "format": "dateOptionalTime" 13 | }, 14 | "aggregate_id": { 15 | "type": "keyword" 16 | } 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /elastalert/es_mappings/6/silence.json: -------------------------------------------------------------------------------- 1 | { 2 | "properties": { 3 | "rule_name": { 4 | "type": "keyword" 5 | }, 6 | "until": { 7 | "type": "date", 8 | "format": "dateOptionalTime" 9 | }, 10 | "@timestamp": { 11 | "type": "date", 12 | "format": "dateOptionalTime" 13 | } 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /elastalert/kibana_discover.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # flake8: noqa 3 | import datetime 4 | import logging 5 | import json 6 | import os.path 7 | import prison 8 | import urllib.parse 9 | 10 | from .util import EAException 11 | from .util import lookup_es_key 12 | from .util import ts_add 13 | 14 | kibana_default_timedelta = datetime.timedelta(minutes=10) 15 | 16 | kibana5_kibana6_versions = frozenset(['5.6', '6.0', '6.1', '6.2', '6.3', '6.4', '6.5', '6.6', '6.7', '6.8']) 17 | kibana7_versions = frozenset(['7.0', '7.1', '7.2', '7.3']) 18 | 19 | def generate_kibana_discover_url(rule, match): 20 | ''' Creates a link for a kibana discover app. ''' 21 | 22 | discover_app_url = rule.get('kibana_discover_app_url') 23 | if not discover_app_url: 24 | logging.warning( 25 | 'Missing kibana_discover_app_url for rule %s' % ( 26 | rule.get('name', '') 27 | ) 28 | ) 29 | return None 30 | 31 | kibana_version = rule.get('kibana_discover_version') 32 | if not kibana_version: 33 | logging.warning( 34 | 'Missing kibana_discover_version for rule %s' % ( 35 | rule.get('name', '') 36 | ) 37 | ) 38 | return None 39 | 40 | index = rule.get('kibana_discover_index_pattern_id') 41 | if not index: 42 | logging.warning( 43 | 'Missing kibana_discover_index_pattern_id for rule %s' % ( 44 | rule.get('name', '') 45 | ) 46 | ) 47 | return None 48 | 49 | columns = rule.get('kibana_discover_columns', ['_source']) 50 | filters = rule.get('filter', []) 51 | 52 | if 'query_key' in rule: 53 | query_keys = rule.get('compound_query_key', [rule['query_key']]) 54 | else: 55 | query_keys = [] 56 | 57 | timestamp = lookup_es_key(match, rule['timestamp_field']) 58 | timeframe = rule.get('timeframe', kibana_default_timedelta) 59 | from_timedelta = rule.get('kibana_discover_from_timedelta', timeframe) 60 | from_time = ts_add(timestamp, -from_timedelta) 61 | to_timedelta = rule.get('kibana_discover_to_timedelta', timeframe) 62 | to_time = ts_add(timestamp, to_timedelta) 63 | 64 | if kibana_version in kibana5_kibana6_versions: 65 | globalState = kibana6_disover_global_state(from_time, to_time) 66 | appState = kibana_discover_app_state(index, columns, filters, query_keys, match) 67 | 68 | elif kibana_version in kibana7_versions: 69 | globalState = kibana7_disover_global_state(from_time, to_time) 70 | appState = kibana_discover_app_state(index, columns, filters, query_keys, match) 71 | 72 | else: 73 | logging.warning( 74 | 'Unknown kibana discover application version %s for rule %s' % ( 75 | kibana_version, 76 | rule.get('name', '') 77 | ) 78 | ) 79 | return None 80 | 81 | return "%s?_g=%s&_a=%s" % ( 82 | os.path.expandvars(discover_app_url), 83 | urllib.parse.quote(globalState), 84 | urllib.parse.quote(appState) 85 | ) 86 | 87 | 88 | def kibana6_disover_global_state(from_time, to_time): 89 | return prison.dumps( { 90 | 'refreshInterval': { 91 | 'pause': True, 92 | 'value': 0 93 | }, 94 | 'time': { 95 | 'from': from_time, 96 | 'mode': 'absolute', 97 | 'to': to_time 98 | } 99 | } ) 100 | 101 | 102 | def kibana7_disover_global_state(from_time, to_time): 103 | return prison.dumps( { 104 | 'filters': [], 105 | 'refreshInterval': { 106 | 'pause': True, 107 | 'value': 0 108 | }, 109 | 'time': { 110 | 'from': from_time, 111 | 'to': to_time 112 | } 113 | } ) 114 | 115 | 116 | def kibana_discover_app_state(index, columns, filters, query_keys, match): 117 | app_filters = [] 118 | 119 | if filters: 120 | bool_filter = { 'must': filters } 121 | app_filters.append( { 122 | '$state': { 123 | 'store': 'appState' 124 | }, 125 | 'bool': bool_filter, 126 | 'meta': { 127 | 'alias': 'filter', 128 | 'disabled': False, 129 | 'index': index, 130 | 'key': 'bool', 131 | 'negate': False, 132 | 'type': 'custom', 133 | 'value': json.dumps(bool_filter, separators=(',', ':')) 134 | }, 135 | } ) 136 | 137 | for query_key in query_keys: 138 | query_value = lookup_es_key(match, query_key) 139 | 140 | if query_value is None: 141 | app_filters.append( { 142 | '$state': { 143 | 'store': 'appState' 144 | }, 145 | 'exists': { 146 | 'field': query_key 147 | }, 148 | 'meta': { 149 | 'alias': None, 150 | 'disabled': False, 151 | 'index': index, 152 | 'key': query_key, 153 | 'negate': True, 154 | 'type': 'exists', 155 | 'value': 'exists' 156 | } 157 | } ) 158 | 159 | else: 160 | app_filters.append( { 161 | '$state': { 162 | 'store': 'appState' 163 | }, 164 | 'meta': { 165 | 'alias': None, 166 | 'disabled': False, 167 | 'index': index, 168 | 'key': query_key, 169 | 'negate': False, 170 | 'params': { 171 | 'query': query_value, 172 | 'type': 'phrase' 173 | }, 174 | 'type': 'phrase', 175 | 'value': str(query_value) 176 | }, 177 | 'query': { 178 | 'match': { 179 | query_key: { 180 | 'query': query_value, 181 | 'type': 'phrase' 182 | } 183 | } 184 | } 185 | } ) 186 | 187 | return prison.dumps( { 188 | 'columns': columns, 189 | 'filters': app_filters, 190 | 'index': index, 191 | 'interval': 'auto' 192 | } ) 193 | -------------------------------------------------------------------------------- /elastalert/opsgenie.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import json 3 | import logging 4 | import os.path 5 | import requests 6 | 7 | from .alerts import Alerter 8 | from .alerts import BasicMatchString 9 | from .util import EAException 10 | from .util import elastalert_logger 11 | from .util import lookup_es_key 12 | 13 | 14 | class OpsGenieAlerter(Alerter): 15 | '''Sends a http request to the OpsGenie API to signal for an alert''' 16 | required_options = frozenset(['opsgenie_key']) 17 | 18 | def __init__(self, *args): 19 | super(OpsGenieAlerter, self).__init__(*args) 20 | self.account = self.rule.get('opsgenie_account') 21 | self.api_key = self.rule.get('opsgenie_key', 'key') 22 | self.default_reciepients = self.rule.get('opsgenie_default_receipients', None) 23 | self.recipients = self.rule.get('opsgenie_recipients') 24 | self.recipients_args = self.rule.get('opsgenie_recipients_args') 25 | self.default_teams = self.rule.get('opsgenie_default_teams', None) 26 | self.teams = self.rule.get('opsgenie_teams') 27 | self.teams_args = self.rule.get('opsgenie_teams_args') 28 | self.tags = self.rule.get('opsgenie_tags', []) + ['ElastAlert', self.rule['name']] 29 | self.to_addr = self.rule.get('opsgenie_addr', 'https://api.opsgenie.com/v2/alerts') 30 | self.custom_message = self.rule.get('opsgenie_message') 31 | self.opsgenie_subject = self.rule.get('opsgenie_subject') 32 | self.opsgenie_subject_args = self.rule.get('opsgenie_subject_args') 33 | self.alias = self.rule.get('opsgenie_alias') 34 | self.opsgenie_proxy = self.rule.get('opsgenie_proxy', None) 35 | self.priority = self.rule.get('opsgenie_priority') 36 | self.opsgenie_details = self.rule.get('opsgenie_details', {}) 37 | 38 | def _parse_responders(self, responders, responder_args, matches, default_responders): 39 | if responder_args: 40 | formated_responders = list() 41 | responders_values = dict((k, lookup_es_key(matches[0], v)) for k, v in responder_args.items()) 42 | responders_values = dict((k, v) for k, v in responders_values.items() if v) 43 | 44 | for responder in responders: 45 | responder = str(responder) 46 | try: 47 | formated_responders.append(responder.format(**responders_values)) 48 | except KeyError as error: 49 | logging.warn("OpsGenieAlerter: Cannot create responder for OpsGenie Alert. Key not foud: %s. " % (error)) 50 | if not formated_responders: 51 | logging.warn("OpsGenieAlerter: no responders can be formed. Trying the default responder ") 52 | if not default_responders: 53 | logging.warn("OpsGenieAlerter: default responder not set. Falling back") 54 | formated_responders = responders 55 | else: 56 | formated_responders = default_responders 57 | responders = formated_responders 58 | return responders 59 | 60 | def _fill_responders(self, responders, type_): 61 | return [{'id': r, 'type': type_} for r in responders] 62 | 63 | def alert(self, matches): 64 | body = '' 65 | for match in matches: 66 | body += str(BasicMatchString(self.rule, match)) 67 | # Separate text of aggregated alerts with dashes 68 | if len(matches) > 1: 69 | body += '\n----------------------------------------\n' 70 | 71 | if self.custom_message is None: 72 | self.message = self.create_title(matches) 73 | else: 74 | self.message = self.custom_message.format(**matches[0]) 75 | self.recipients = self._parse_responders(self.recipients, self.recipients_args, matches, self.default_reciepients) 76 | self.teams = self._parse_responders(self.teams, self.teams_args, matches, self.default_teams) 77 | post = {} 78 | post['message'] = self.message 79 | if self.account: 80 | post['user'] = self.account 81 | if self.recipients: 82 | post['responders'] = [{'username': r, 'type': 'user'} for r in self.recipients] 83 | if self.teams: 84 | post['teams'] = [{'name': r, 'type': 'team'} for r in self.teams] 85 | post['description'] = body 86 | post['source'] = 'ElastAlert' 87 | 88 | for i, tag in enumerate(self.tags): 89 | self.tags[i] = tag.format(**matches[0]) 90 | post['tags'] = self.tags 91 | 92 | if self.priority and self.priority not in ('P1', 'P2', 'P3', 'P4', 'P5'): 93 | logging.warn("Priority level does not appear to be specified correctly. \ 94 | Please make sure to set it to a value between P1 and P5") 95 | else: 96 | post['priority'] = self.priority 97 | 98 | if self.alias is not None: 99 | post['alias'] = self.alias.format(**matches[0]) 100 | 101 | details = self.get_details(matches) 102 | if details: 103 | post['details'] = details 104 | 105 | logging.debug(json.dumps(post)) 106 | 107 | headers = { 108 | 'Content-Type': 'application/json', 109 | 'Authorization': 'GenieKey {}'.format(self.api_key), 110 | } 111 | # set https proxy, if it was provided 112 | proxies = {'https': self.opsgenie_proxy} if self.opsgenie_proxy else None 113 | 114 | try: 115 | r = requests.post(self.to_addr, json=post, headers=headers, proxies=proxies) 116 | 117 | logging.debug('request response: {0}'.format(r)) 118 | if r.status_code != 202: 119 | elastalert_logger.info("Error response from {0} \n " 120 | "API Response: {1}".format(self.to_addr, r)) 121 | r.raise_for_status() 122 | logging.info("Alert sent to OpsGenie") 123 | except Exception as err: 124 | raise EAException("Error sending alert: {0}".format(err)) 125 | 126 | def create_default_title(self, matches): 127 | subject = 'ElastAlert: %s' % (self.rule['name']) 128 | 129 | # If the rule has a query_key, add that value plus timestamp to subject 130 | if 'query_key' in self.rule: 131 | qk = matches[0].get(self.rule['query_key']) 132 | if qk: 133 | subject += ' - %s' % (qk) 134 | 135 | return subject 136 | 137 | def create_title(self, matches): 138 | """ Creates custom alert title to be used as subject for opsgenie alert.""" 139 | if self.opsgenie_subject: 140 | return self.create_custom_title(matches) 141 | 142 | return self.create_default_title(matches) 143 | 144 | def create_custom_title(self, matches): 145 | opsgenie_subject = str(self.rule['opsgenie_subject']) 146 | 147 | if self.opsgenie_subject_args: 148 | opsgenie_subject_values = [lookup_es_key(matches[0], arg) for arg in self.opsgenie_subject_args] 149 | 150 | for i, subject_value in enumerate(opsgenie_subject_values): 151 | if subject_value is None: 152 | alert_value = self.rule.get(self.opsgenie_subject_args[i]) 153 | if alert_value: 154 | opsgenie_subject_values[i] = alert_value 155 | 156 | opsgenie_subject_values = ['' if val is None else val for val in opsgenie_subject_values] 157 | return opsgenie_subject.format(*opsgenie_subject_values) 158 | 159 | return opsgenie_subject 160 | 161 | def get_info(self): 162 | ret = {'type': 'opsgenie'} 163 | if self.recipients: 164 | ret['recipients'] = self.recipients 165 | if self.account: 166 | ret['account'] = self.account 167 | if self.teams: 168 | ret['teams'] = self.teams 169 | return ret 170 | 171 | def get_details(self, matches): 172 | details = {} 173 | 174 | for key, value in self.opsgenie_details.items(): 175 | 176 | if type(value) is dict: 177 | if 'field' in value: 178 | field_value = lookup_es_key(matches[0], value['field']) 179 | if field_value is not None: 180 | details[key] = str(field_value) 181 | 182 | elif type(value) is str: 183 | details[key] = os.path.expandvars(value) 184 | 185 | return details 186 | -------------------------------------------------------------------------------- /elastalert/rule_from_kibana.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | import json 4 | 5 | import yaml 6 | 7 | from elastalert.kibana import filters_from_dashboard 8 | from elastalert.util import elasticsearch_client 9 | 10 | 11 | def main(): 12 | es_host = input("Elasticsearch host: ") 13 | es_port = input("Elasticsearch port: ") 14 | db_name = input("Dashboard name: ") 15 | send_get_body_as = input("Method for querying Elasticsearch[GET]: ") or 'GET' 16 | 17 | es = elasticsearch_client({'es_host': es_host, 'es_port': es_port, 'send_get_body_as': send_get_body_as}) 18 | 19 | print("Elastic Version:" + es.es_version) 20 | 21 | query = {'query': {'term': {'_id': db_name}}} 22 | 23 | if es.is_atleastsixsix(): 24 | # TODO check support for kibana 7 25 | # TODO use doc_type='_doc' instead 26 | res = es.deprecated_search(index='kibana-int', doc_type='dashboard', body=query, _source_includes=['dashboard']) 27 | else: 28 | res = es.deprecated_search(index='kibana-int', doc_type='dashboard', body=query, _source_include=['dashboard']) 29 | 30 | if not res['hits']['hits']: 31 | print("No dashboard %s found" % (db_name)) 32 | exit() 33 | 34 | db = json.loads(res['hits']['hits'][0]['_source']['dashboard']) 35 | config_filters = filters_from_dashboard(db) 36 | 37 | print("\nPartial Config file") 38 | print("-----------\n") 39 | print("name: %s" % (db_name)) 40 | print("es_host: %s" % (es_host)) 41 | print("es_port: %s" % (es_port)) 42 | print("filter:") 43 | print(yaml.safe_dump(config_filters)) 44 | 45 | 46 | if __name__ == '__main__': 47 | main() 48 | -------------------------------------------------------------------------------- /elastalert/schema.yaml: -------------------------------------------------------------------------------- 1 | $schema: http://json-schema.org/draft-07/schema# 2 | definitions: 3 | 4 | # Either a single string OR an array of strings 5 | arrayOfStrings: &arrayOfString 6 | type: [string, array] 7 | items: {type: string} 8 | 9 | # Either a single string OR an array of strings OR an array of ararys 10 | arrayOfStringsOrOtherArrays: &arrayOfStringsOrOtherArray 11 | type: [string, array] 12 | items: {type: [string, array]} 13 | 14 | timedelta: &timedelta 15 | type: object 16 | additionalProperties: false 17 | properties: 18 | days: {type: number} 19 | weeks: {type: number} 20 | hours: {type: number} 21 | minutes: {type: number} 22 | seconds: {type: number} 23 | milliseconds: {type: number} 24 | 25 | timeFrame: &timeframe 26 | type: object 27 | additionalProperties: false 28 | properties: 29 | days: {type: number} 30 | weeks: {type: number} 31 | hours: {type: number} 32 | minutes: {type: number} 33 | seconds: {type: number} 34 | milliseconds: {type: number} 35 | schedule: {type: string} 36 | 37 | filter: &filter {} 38 | 39 | mattermostField: &mattermostField 40 | type: object 41 | additionalProperties: false 42 | properties: 43 | title: {type: string} 44 | value: {type: string} 45 | args: *arrayOfString 46 | short: {type: boolean} 47 | 48 | required: [type, index, alert] 49 | type: object 50 | 51 | ### Rule Types section 52 | oneOf: 53 | - title: Any 54 | properties: 55 | type: {enum: [any]} 56 | 57 | - title: Blacklist 58 | required: [blacklist, compare_key] 59 | properties: 60 | type: {enum: [blacklist]} 61 | compare_key: {'items': {'type': 'string'},'type': ['string', 'array']} 62 | blacklist: {type: array, items: {type: string}} 63 | 64 | - title: Whitelist 65 | required: [whitelist, compare_key, ignore_null] 66 | properties: 67 | type: {enum: [whitelist]} 68 | compare_key: {'items': {'type': 'string'},'type': ['string', 'array']} 69 | whitelist: {type: array, items: {type: string}} 70 | ignore_null: {type: boolean} 71 | 72 | - title: Change 73 | required: [query_key, compare_key, ignore_null] 74 | properties: 75 | type: {enum: [change]} 76 | compare_key: {'items': {'type': 'string'},'type': ['string', 'array']} 77 | ignore_null: {type: boolean} 78 | timeframe: *timeframe 79 | 80 | - title: Frequency 81 | required: [num_events, timeframe] 82 | properties: 83 | type: {enum: [frequency]} 84 | num_events: {type: integer} 85 | timeframe: *timeframe 86 | use_count_query: {type: boolean} 87 | doc_type: {type: string} 88 | use_terms_query: {type: boolean} 89 | terms_size: {type: integer} 90 | attach_related: {type: boolean} 91 | 92 | - title: Spike 93 | required: [spike_height, spike_type, timeframe] 94 | properties: 95 | type: {enum: [spike]} 96 | spike_height: {type: number} 97 | spike_type: {enum: ["up", "down", "both"]} 98 | timeframe: *timeframe 99 | use_count_query: {type: boolean} 100 | doc_type: {type: string} 101 | use_terms_query: {type: boolean} 102 | terms_size: {type: integer} 103 | alert_on_new_data: {type: boolean} 104 | threshold_ref: {type: integer} 105 | threshold_cur: {type: integer} 106 | 107 | - title: Spike Aggregation 108 | required: [spike_height, spike_type, timeframe] 109 | properties: 110 | type: {enum: [spike_aggregation]} 111 | spike_height: {type: number} 112 | spike_type: {enum: ["up", "down", "both"]} 113 | metric_agg_type: {enum: ["min", "max", "avg", "sum", "cardinality", "value_count"]} 114 | timeframe: *timeframe 115 | use_count_query: {type: boolean} 116 | doc_type: {type: string} 117 | use_terms_query: {type: boolean} 118 | terms_size: {type: integer} 119 | alert_on_new_data: {type: boolean} 120 | threshold_ref: {type: number} 121 | threshold_cur: {type: number} 122 | min_doc_count: {type: integer} 123 | 124 | - title: Flatline 125 | required: [threshold, timeframe] 126 | properties: 127 | type: {enum: [flatline]} 128 | timeframe: *timeframe 129 | threshold: {type: integer} 130 | use_count_query: {type: boolean} 131 | doc_type: {type: string} 132 | 133 | - title: New Term 134 | required: [] 135 | properties: 136 | type: {enum: [new_term]} 137 | fields: *arrayOfStringsOrOtherArray 138 | terms_window_size: *timeframe 139 | alert_on_missing_field: {type: boolean} 140 | use_terms_query: {type: boolean} 141 | terms_size: {type: integer} 142 | 143 | - title: Cardinality 144 | required: [cardinality_field, timeframe] 145 | properties: 146 | type: {enum: [cardinality]} 147 | max_cardinality: {type: integer} 148 | min_cardinality: {type: integer} 149 | cardinality_field: {type: string} 150 | timeframe: *timeframe 151 | 152 | - title: Metric Aggregation 153 | required: [metric_agg_key,metric_agg_type] 154 | properties: 155 | type: {enum: [metric_aggregation]} 156 | metric_agg_type: {enum: ["min", "max", "avg", "sum", "cardinality", "value_count"]} 157 | #timeframe: *timeframe 158 | 159 | - title: Percentage Match 160 | required: [match_bucket_filter] 161 | properties: 162 | type: {enum: [percentage_match]} 163 | 164 | - title: Custom Rule from Module 165 | properties: 166 | # custom rules include a period in the rule type 167 | type: {pattern: "[.]"} 168 | 169 | properties: 170 | 171 | # Common Settings 172 | es_host: {type: string} 173 | es_port: {type: integer} 174 | index: {type: string} 175 | name: {type: string} 176 | 177 | use_ssl: {type: boolean} 178 | verify_certs: {type: boolean} 179 | es_username: {type: string} 180 | es_password: {type: string} 181 | use_strftime_index: {type: boolean} 182 | 183 | # Optional Settings 184 | import: {type: string} 185 | aggregation: *timeframe 186 | realert: *timeframe 187 | exponential_realert: *timeframe 188 | 189 | buffer_time: *timeframe 190 | query_delay: *timeframe 191 | max_query_size: {type: integer} 192 | max_scrolling: {type: integer} 193 | 194 | owner: {type: string} 195 | priority: {type: integer} 196 | 197 | filter : 198 | type: [array, object] 199 | items: *filter 200 | additionalProperties: false 201 | properties: 202 | download_dashboard: {type: string} 203 | 204 | include: {type: array, items: {type: string}} 205 | top_count_keys: {type: array, items: {type: string}} 206 | top_count_number: {type: integer} 207 | raw_count_keys: {type: boolean} 208 | generate_kibana_link: {type: boolean} 209 | kibana_dashboard: {type: string} 210 | use_kibana_dashboard: {type: string} 211 | use_local_time: {type: boolean} 212 | match_enhancements: {type: array, items: {type: string}} 213 | query_key: *arrayOfString 214 | replace_dots_in_field_names: {type: boolean} 215 | scan_entire_timeframe: {type: boolean} 216 | 217 | ### Kibana Discover App Link 218 | generate_kibana_discover_url: {type: boolean} 219 | kibana_discover_app_url: {type: string, format: uri} 220 | kibana_discover_version: {type: string, enum: ['7.3', '7.2', '7.1', '7.0', '6.8', '6.7', '6.6', '6.5', '6.4', '6.3', '6.2', '6.1', '6.0', '5.6']} 221 | kibana_discover_index_pattern_id: {type: string, minLength: 1} 222 | kibana_discover_columns: {type: array, items: {type: string, minLength: 1}, minItems: 1} 223 | kibana_discover_from_timedelta: *timedelta 224 | kibana_discover_to_timedelta: *timedelta 225 | 226 | # Alert Content 227 | alert_text: {type: string} # Python format string 228 | alert_text_args: {type: array, items: {type: string}} 229 | alert_text_kw: {type: object} 230 | alert_text_type: {enum: [alert_text_only, exclude_fields, aggregation_summary_only]} 231 | alert_missing_value: {type: string} 232 | timestamp_field: {type: string} 233 | field: {} 234 | 235 | ### Commands 236 | command: *arrayOfString 237 | pipe_match_json: {type: boolean} 238 | fail_on_non_zero_exit: {type: boolean} 239 | 240 | ### Email 241 | email: *arrayOfString 242 | email_reply_to: {type: string} 243 | notify_email: *arrayOfString # if rule is slow or erroring, send to this email 244 | smtp_host: {type: string} 245 | from_addr: {type: string} 246 | 247 | ### JIRA 248 | jira_server: {type: string} 249 | jira_project: {type: string} 250 | jira_issuetype: {type: string} 251 | jira_account_file: {type: string} # a Yaml file that includes the keys {user:, password:} 252 | 253 | jira_assignee: {type: string} 254 | jira_component: *arrayOfString 255 | jira_components: *arrayOfString 256 | jira_label: *arrayOfString 257 | jira_labels: *arrayOfString 258 | jira_bump_tickets: {type: boolean} 259 | jira_bump_in_statuses: *arrayOfString 260 | jira_bump_not_in_statuses: *arrayOfString 261 | jira_max_age: {type: number} 262 | jira_watchers: *arrayOfString 263 | 264 | ### HipChat 265 | hipchat_auth_token: {type: string} 266 | hipchat_room_id: {type: [string, integer]} 267 | hipchat_domain: {type: string} 268 | hipchat_ignore_ssl_errors: {type: boolean} 269 | hipchat_notify: {type: boolean} 270 | hipchat_from: {type: string} 271 | hipchat_mentions: {type: array, items: {type: string}} 272 | 273 | ### Stride 274 | stride_access_token: {type: string} 275 | stride_cloud_id: {type: string} 276 | stride_conversation_id: {type: string} 277 | stride_ignore_ssl_errors: {type: boolean} 278 | 279 | ### Slack 280 | slack_webhook_url: *arrayOfString 281 | slack_username_override: {type: string} 282 | slack_emoji_override: {type: string} 283 | slack_icon_url_override: {type: string} 284 | slack_msg_color: {enum: [good, warning, danger]} 285 | slack_parse_override: {enum: [none, full]} 286 | slack_text_string: {type: string} 287 | slack_ignore_ssl_errors: {type: boolean} 288 | slack_ca_certs: {type: string} 289 | slack_attach_kibana_discover_url: {type: boolean} 290 | slack_kibana_discover_color: {type: string} 291 | slack_kibana_discover_title: {type: string} 292 | 293 | ### Mattermost 294 | mattermost_webhook_url: *arrayOfString 295 | mattermost_proxy: {type: string} 296 | mattermost_ignore_ssl_errors: {type: boolean} 297 | mattermost_username_override: {type: string} 298 | mattermost_icon_url_override: {type: string} 299 | mattermost_channel_override: {type: string} 300 | mattermost_msg_color: {enum: [good, warning, danger]} 301 | mattermost_msg_pretext: {type: string} 302 | mattermost_msg_fields: *mattermostField 303 | 304 | ## Opsgenie 305 | opsgenie_details: 306 | type: object 307 | minProperties: 1 308 | patternProperties: 309 | "^.+$": 310 | oneOf: 311 | - type: string 312 | - type: object 313 | additionalProperties: false 314 | required: [field] 315 | properties: 316 | field: {type: string, minLength: 1} 317 | 318 | ### PagerDuty 319 | pagerduty_service_key: {type: string} 320 | pagerduty_client_name: {type: string} 321 | pagerduty_event_type: {enum: [none, trigger, resolve, acknowledge]} 322 | 323 | ### PagerTree 324 | pagertree_integration_url: {type: string} 325 | 326 | 327 | ### Exotel 328 | exotel_account_sid: {type: string} 329 | exotel_auth_token: {type: string} 330 | exotel_to_number: {type: string} 331 | exotel_from_number: {type: string} 332 | 333 | ### Twilio 334 | twilio_account_sid: {type: string} 335 | twilio_auth_token: {type: string} 336 | twilio_to_number: {type: string} 337 | twilio_from_number: {type: string} 338 | 339 | ### VictorOps 340 | victorops_api_key: {type: string} 341 | victorops_routing_key: {type: string} 342 | victorops_message_type: {enum: [INFO, WARNING, ACKNOWLEDGEMENT, CRITICAL, RECOVERY]} 343 | victorops_entity_id: {type: string} 344 | victorops_entity_display_name: {type: string} 345 | 346 | ### Telegram 347 | telegram_bot_token: {type: string} 348 | telegram_room_id: {type: string} 349 | telegram_api_url: {type: string} 350 | 351 | ### Gitter 352 | gitter_webhook_url: {type: string} 353 | gitter_proxy: {type: string} 354 | gitter_msg_level: {enum: [info, error]} 355 | 356 | ### Alerta 357 | alerta_api_url: {type: string} 358 | alerta_api_key: {type: string} 359 | alerta_severity: {enum: [unknown, security, debug, informational, ok, normal, cleared, indeterminate, warning, minor, major, critical]} 360 | alerta_resource: {type: string} # Python format string 361 | alerta_environment: {type: string} # Python format string 362 | alerta_origin: {type: string} # Python format string 363 | alerta_group: {type: string} # Python format string 364 | alerta_service: {type: array, items: {type: string}} # Python format string 365 | alerta_service: {type: array, items: {type: string}} # Python format string 366 | alerta_correlate: {type: array, items: {type: string}} # Python format string 367 | alerta_tags: {type: array, items: {type: string}} # Python format string 368 | alerta_event: {type: string} # Python format string 369 | alerta_customer: {type: string} 370 | alerta_text: {type: string} # Python format string 371 | alerta_type: {type: string} 372 | alerta_value: {type: string} # Python format string 373 | alerta_attributes_keys: {type: array, items: {type: string}} 374 | alerta_attributes_values: {type: array, items: {type: string}} # Python format string 375 | alerta_new_style_string_format: {type: boolean} 376 | 377 | 378 | ### Simple 379 | simple_webhook_url: *arrayOfString 380 | simple_proxy: {type: string} 381 | 382 | ### LineNotify 383 | linenotify_access_token: {type: string} 384 | 385 | ### Zabbix 386 | zbx_sender_host: {type: string} 387 | zbx_sender_port: {type: integer} 388 | zbx_host: {type: string} 389 | zbx_item: {type: string} 390 | -------------------------------------------------------------------------------- /elastalert/zabbix.py: -------------------------------------------------------------------------------- 1 | from alerts import Alerter # , BasicMatchString 2 | import logging 3 | from pyzabbix.api import ZabbixAPI 4 | from pyzabbix import ZabbixSender, ZabbixMetric 5 | from datetime import datetime 6 | 7 | 8 | class ZabbixClient(ZabbixAPI): 9 | 10 | def __init__(self, url='http://localhost', use_authenticate=False, user='Admin', password='zabbix', sender_host='localhost', 11 | sender_port=10051): 12 | self.url = url 13 | self.use_authenticate = use_authenticate 14 | self.sender_host = sender_host 15 | self.sender_port = sender_port 16 | self.metrics_chunk_size = 200 17 | self.aggregated_metrics = [] 18 | self.logger = logging.getLogger(self.__class__.__name__) 19 | super(ZabbixClient, self).__init__(url=self.url, use_authenticate=self.use_authenticate, user=user, password=password) 20 | 21 | def send_metric(self, hostname, key, data): 22 | zm = ZabbixMetric(hostname, key, data) 23 | if self.send_aggregated_metrics: 24 | 25 | self.aggregated_metrics.append(zm) 26 | if len(self.aggregated_metrics) > self.metrics_chunk_size: 27 | self.logger.info("Sending: %s metrics" % (len(self.aggregated_metrics))) 28 | try: 29 | ZabbixSender(zabbix_server=self.sender_host, zabbix_port=self.sender_port).send(self.aggregated_metrics) 30 | self.aggregated_metrics = [] 31 | except Exception as e: 32 | self.logger.exception(e) 33 | pass 34 | else: 35 | try: 36 | ZabbixSender(zabbix_server=self.sender_host, zabbix_port=self.sender_port).send(zm) 37 | except Exception as e: 38 | self.logger.exception(e) 39 | pass 40 | 41 | 42 | class ZabbixAlerter(Alerter): 43 | 44 | # By setting required_options to a set of strings 45 | # You can ensure that the rule config file specifies all 46 | # of the options. Otherwise, ElastAlert will throw an exception 47 | # when trying to load the rule. 48 | required_options = frozenset(['zbx_sender_host', 'zbx_sender_port', 'zbx_host', 'zbx_key']) 49 | 50 | def __init__(self, *args): 51 | super(ZabbixAlerter, self).__init__(*args) 52 | 53 | self.zbx_sender_host = self.rule.get('zbx_sender_host', 'localhost') 54 | self.zbx_sender_port = self.rule.get('zbx_sender_port', 10051) 55 | self.zbx_host = self.rule.get('zbx_host') 56 | self.zbx_key = self.rule.get('zbx_key') 57 | 58 | # Alert is called 59 | def alert(self, matches): 60 | 61 | # Matches is a list of match dictionaries. 62 | # It contains more than one match when the alert has 63 | # the aggregation option set 64 | zm = [] 65 | for match in matches: 66 | ts_epoch = int(datetime.strptime(match['@timestamp'], "%Y-%m-%dT%H:%M:%S.%fZ").strftime('%s')) 67 | zm.append(ZabbixMetric(host=self.zbx_host, key=self.zbx_key, value=1, clock=ts_epoch)) 68 | 69 | ZabbixSender(zabbix_server=self.zbx_sender_host, zabbix_port=self.zbx_sender_port).send(zm) 70 | 71 | # get_info is called after an alert is sent to get data that is written back 72 | # to Elasticsearch in the field "alert_info" 73 | # It should return a dict of information relevant to what the alert does 74 | def get_info(self): 75 | return {'type': 'zabbix Alerter'} 76 | -------------------------------------------------------------------------------- /example_rules/example_cardinality.yaml: -------------------------------------------------------------------------------- 1 | # Alert when the rate of events exceeds a threshold 2 | 3 | # (Optional) 4 | # Elasticsearch host 5 | # es_host: elasticsearch.example.com 6 | 7 | # (Optional) 8 | # Elasticsearch port 9 | # es_port: 14900 10 | 11 | # (Required) 12 | # Index to search, wildcard supported 13 | index: logstash-* 14 | 15 | # (OptionaL) Connect with SSL to Elasticsearch 16 | #use_ssl: True 17 | 18 | # (Optional) basic-auth username and password for Elasticsearch 19 | #es_username: someusername 20 | #es_password: somepassword 21 | 22 | # (Required) 23 | # Rule name, must be unique 24 | name: Example cardinality rule 25 | 26 | # (Required) 27 | # Type of alert. 28 | # the frequency rule type alerts when num_events events occur with timeframe time 29 | type: cardinality 30 | 31 | # (Required, cardinality specific) 32 | # Count the number of unique values for this field 33 | cardinality_field: "Hostname" 34 | 35 | # (Required, frequency specific) 36 | # Alert when there less than 15 unique hostnames 37 | min_cardinality: 15 38 | 39 | # (Required, frequency specific) 40 | # The cardinality is defined as the number of unique values for the most recent 4 hours 41 | timeframe: 42 | hours: 4 43 | 44 | # (Required) 45 | # A list of Elasticsearch filters used for find events 46 | # These filters are joined with AND and nested in a filtered query 47 | # For more info: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl.html 48 | filter: 49 | - term: 50 | status: "active" 51 | 52 | # (Required) 53 | # The alert is use when a match is found 54 | alert: 55 | - "email" 56 | 57 | # (required, email specific) 58 | # a list of email addresses to send alerts to 59 | email: 60 | - "elastalert@example.com" 61 | -------------------------------------------------------------------------------- /example_rules/example_change.yaml: -------------------------------------------------------------------------------- 1 | # Alert when some field changes between documents 2 | # This rule would alert on documents similar to the following: 3 | # {'username': 'bob', 'country_name': 'USA', '@timestamp': '2014-10-15T00:00:00'} 4 | # {'username': 'bob', 'country_name': 'Russia', '@timestamp': '2014-10-15T05:00:00'} 5 | # Because the user (query_key) bob logged in from different countries (compare_key) in the same day (timeframe) 6 | 7 | # (Optional) 8 | # Elasticsearch host 9 | # es_host: elasticsearch.example.com 10 | 11 | # (Optional) 12 | # Elasticsearch port 13 | # es_port: 14900 14 | 15 | # (Optional) Connect with SSL to Elasticsearch 16 | #use_ssl: True 17 | 18 | # (Optional) basic-auth username and password for elasticsearch 19 | #es_username: someusername 20 | #es_password: somepassword 21 | 22 | # (Required) 23 | # Rule name, must be unique 24 | name: New country login 25 | 26 | # (Required) 27 | # Type of alert. 28 | # the change rule will alert when a certain field changes in two documents within a timeframe 29 | type: change 30 | 31 | # (Required) 32 | # Index to search, wildcard supported 33 | index: logstash-* 34 | 35 | # (Required, change specific) 36 | # The field to look for changes in 37 | compare_key: country_name 38 | 39 | # (Required, change specific) 40 | # Ignore documents without the compare_key (country_name) field 41 | ignore_null: true 42 | 43 | # (Required, change specific) 44 | # The change must occur in two documents with the same query_key 45 | query_key: username 46 | 47 | # (Required, change specific) 48 | # The value of compare_key must change in two events that are less than timeframe apart to trigger an alert 49 | timeframe: 50 | days: 1 51 | 52 | # (Required) 53 | # A list of Elasticsearch filters used for find events 54 | # These filters are joined with AND and nested in a filtered query 55 | # For more info: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl.html 56 | filter: 57 | - query: 58 | query_string: 59 | query: "document_type: login" 60 | 61 | # (Required) 62 | # The alert is use when a match is found 63 | alert: 64 | - "email" 65 | 66 | # (required, email specific) 67 | # a list of email addresses to send alerts to 68 | email: 69 | - "elastalert@example.com" 70 | -------------------------------------------------------------------------------- /example_rules/example_frequency.yaml: -------------------------------------------------------------------------------- 1 | # Alert when the rate of events exceeds a threshold 2 | 3 | # (Optional) 4 | # Elasticsearch host 5 | # es_host: elasticsearch.example.com 6 | 7 | # (Optional) 8 | # Elasticsearch port 9 | # es_port: 14900 10 | 11 | # (OptionaL) Connect with SSL to Elasticsearch 12 | #use_ssl: True 13 | 14 | # (Optional) basic-auth username and password for Elasticsearch 15 | #es_username: someusername 16 | #es_password: somepassword 17 | 18 | # (Required) 19 | # Rule name, must be unique 20 | name: Example frequency rule 21 | 22 | # (Required) 23 | # Type of alert. 24 | # the frequency rule type alerts when num_events events occur with timeframe time 25 | type: frequency 26 | 27 | # (Required) 28 | # Index to search, wildcard supported 29 | index: logstash-* 30 | 31 | # (Required, frequency specific) 32 | # Alert when this many documents matching the query occur within a timeframe 33 | num_events: 50 34 | 35 | # (Required, frequency specific) 36 | # num_events must occur within this amount of time to trigger an alert 37 | timeframe: 38 | hours: 4 39 | 40 | # (Required) 41 | # A list of Elasticsearch filters used for find events 42 | # These filters are joined with AND and nested in a filtered query 43 | # For more info: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl.html 44 | filter: 45 | - term: 46 | some_field: "some_value" 47 | 48 | # (Required) 49 | # The alert is use when a match is found 50 | alert: 51 | - "email" 52 | 53 | # (required, email specific) 54 | # a list of email addresses to send alerts to 55 | email: 56 | - "elastalert@example.com" 57 | -------------------------------------------------------------------------------- /example_rules/example_new_term.yaml: -------------------------------------------------------------------------------- 1 | # Alert when a login event is detected for user "admin" never before seen IP 2 | # In this example, "login" logs contain which user has logged in from what IP 3 | 4 | # (Optional) 5 | # Elasticsearch host 6 | # es_host: elasticsearch.example.com 7 | 8 | # (Optional) 9 | # Elasticsearch port 10 | # es_port: 14900 11 | 12 | # (OptionaL) Connect with SSL to Elasticsearch 13 | #use_ssl: True 14 | 15 | # (Optional) basic-auth username and password for Elasticsearch 16 | #es_username: someusername 17 | #es_password: somepassword 18 | 19 | # (Required) 20 | # Rule name, must be unique 21 | name: Example new term rule 22 | 23 | # (Required) 24 | # Type of alert. 25 | # the frequency rule type alerts when num_events events occur with timeframe time 26 | type: new_term 27 | 28 | # (Required) 29 | # Index to search, wildcard supported 30 | index: logstash-* 31 | 32 | # (Required, new_term specific) 33 | # Monitor the field ip_address 34 | fields: 35 | - "ip_address" 36 | 37 | # (Optional, new_term specific) 38 | # This means that we will query 90 days worth of data when ElastAlert starts to find which values of ip_address already exist 39 | # If they existed in the last 90 days, no alerts will be triggered for them when they appear 40 | terms_window_size: 41 | days: 90 42 | 43 | # (Required) 44 | # A list of Elasticsearch filters used for find events 45 | # These filters are joined with AND and nested in a filtered query 46 | # For more info: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl.html 47 | # We are filtering for only "login_event" type documents with username "admin" 48 | filter: 49 | - term: 50 | _type: "login_event" 51 | - term: 52 | username: admin 53 | 54 | # (Required) 55 | # The alert is use when a match is found 56 | alert: 57 | - "email" 58 | 59 | # (required, email specific) 60 | # a list of email addresses to send alerts to 61 | email: 62 | - "elastalert@example.com" 63 | -------------------------------------------------------------------------------- /example_rules/example_opsgenie_frequency.yaml: -------------------------------------------------------------------------------- 1 | # Alert when the rate of events exceeds a threshold 2 | 3 | # (Optional) 4 | # Elasticsearch host 5 | #es_host: localhost 6 | 7 | # (Optional) 8 | # Elasticsearch port 9 | #es_port: 9200 10 | 11 | # (Required) 12 | # OpsGenie credentials 13 | opsgenie_key: ogkey 14 | 15 | # (Optional) 16 | # OpsGenie user account that the alert will show as created by 17 | #opsgenie_account: neh 18 | 19 | # (Optional) 20 | # OpsGenie recipients of the alert 21 | #opsgenie_recipients: 22 | # - "neh" 23 | 24 | # (Optional) 25 | # OpsGenie recipients with args 26 | # opsgenie_recipients: 27 | # - {recipient} 28 | # opsgenie_recipients_args: 29 | # team_prefix:'user.email' 30 | 31 | # (Optional) 32 | # OpsGenie teams to notify 33 | #opsgenie_teams: 34 | # - "Infrastructure" 35 | 36 | # (Optional) 37 | # OpsGenie teams with args 38 | # opsgenie_teams: 39 | # - {team_prefix}-Team 40 | # opsgenie_teams_args: 41 | # team_prefix:'team' 42 | 43 | # (Optional) 44 | # OpsGenie alert tags 45 | opsgenie_tags: 46 | - "Production" 47 | 48 | # (OptionaL) Connect with SSL to Elasticsearch 49 | #use_ssl: True 50 | 51 | # (Optional) basic-auth username and password for Elasticsearch 52 | #es_username: someusername 53 | #es_password: somepassword 54 | 55 | # (Required) 56 | # Rule name, must be unique 57 | name: opsgenie_rule 58 | 59 | # (Required) 60 | # Type of alert. 61 | # the frequency rule type alerts when num_events events occur with timeframe time 62 | type: frequency 63 | 64 | # (Required) 65 | # Index to search, wildcard supported 66 | index: logstash-* 67 | 68 | #doc_type: "golog" 69 | 70 | # (Required, frequency specific) 71 | # Alert when this many documents matching the query occur within a timeframe 72 | num_events: 50 73 | 74 | # (Required, frequency specific) 75 | # num_events must occur within this amount of time to trigger an alert 76 | timeframe: 77 | hours: 2 78 | 79 | # (Required) 80 | # A list of Elasticsearch filters used for find events 81 | # These filters are joined with AND and nested in a filtered query 82 | # For more info: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl.html 83 | filter: 84 | - query: 85 | query_string: 86 | query: "@message: *hihi*" 87 | 88 | # (Required) 89 | # The alert is use when a match is found 90 | alert: 91 | - "opsgenie" 92 | -------------------------------------------------------------------------------- /example_rules/example_percentage_match.yaml: -------------------------------------------------------------------------------- 1 | name: Example Percentage Match 2 | type: percentage_match 3 | 4 | #es_host: localhost 5 | #es_port: 9200 6 | 7 | index: logstash-http-request-* 8 | description: "95% of all http requests should be successful" 9 | 10 | filter: 11 | - term: 12 | _type: http_request 13 | 14 | buffer_time: 15 | minutes: 5 16 | 17 | query_key: Hostname.keyword 18 | doc_type: http_request 19 | 20 | match_bucket_filter: 21 | - terms: 22 | ResponseStatus: [200] 23 | 24 | min_percentage: 95 25 | #max_percentage: 60 26 | 27 | #bucket_interval: 28 | # minutes: 1 29 | 30 | #sync_bucket_interval: true 31 | #allow_buffer_time_overlap: true 32 | #use_run_every_query_size: true 33 | 34 | # (Required) 35 | # The alert is use when a match is found 36 | alert: 37 | - "debug" 38 | 39 | -------------------------------------------------------------------------------- /example_rules/example_single_metric_agg.yaml: -------------------------------------------------------------------------------- 1 | name: Metricbeat CPU Spike Rule 2 | type: metric_aggregation 3 | 4 | #es_host: localhost 5 | #es_port: 9200 6 | 7 | index: metricbeat-* 8 | 9 | buffer_time: 10 | hours: 1 11 | 12 | metric_agg_key: system.cpu.user.pct 13 | metric_agg_type: avg 14 | query_key: beat.hostname 15 | doc_type: metricsets 16 | 17 | bucket_interval: 18 | minutes: 5 19 | 20 | sync_bucket_interval: true 21 | #allow_buffer_time_overlap: true 22 | #use_run_every_query_size: true 23 | 24 | min_threshold: 0.1 25 | max_threshold: 0.8 26 | 27 | filter: 28 | - term: 29 | metricset.name: cpu 30 | 31 | # (Required) 32 | # The alert is use when a match is found 33 | alert: 34 | - "debug" 35 | 36 | -------------------------------------------------------------------------------- /example_rules/example_spike.yaml: -------------------------------------------------------------------------------- 1 | # Alert when there is a sudden spike in the volume of events 2 | 3 | # (Optional) 4 | # Elasticsearch host 5 | # es_host: elasticsearch.example.com 6 | 7 | # (Optional) 8 | # Elasticsearch port 9 | # es_port: 14900 10 | 11 | # (Optional) Connect with SSL to Elasticsearch 12 | #use_ssl: True 13 | 14 | # (Optional) basic-auth username and password for Elasticsearch 15 | #es_username: someusername 16 | #es_password: somepassword 17 | 18 | # (Required) 19 | # Rule name, must be unique 20 | name: Event spike 21 | 22 | # (Required) 23 | # Type of alert. 24 | # the spike rule type compares the number of events within two sliding windows to each other 25 | type: spike 26 | 27 | # (Required) 28 | # Index to search, wildcard supported 29 | index: logstash-* 30 | 31 | # (Required one of _cur or _ref, spike specific) 32 | # The minimum number of events that will trigger an alert 33 | # For example, if there are only 2 events between 12:00 and 2:00, and 20 between 2:00 and 4:00 34 | # _ref is 2 and _cur is 20, and the alert WILL fire because 20 is greater than threshold_cur and (_ref * spike_height) 35 | threshold_cur: 5 36 | #threshold_ref: 5 37 | 38 | # (Required, spike specific) 39 | # The size of the window used to determine average event frequency 40 | # We use two sliding windows each of size timeframe 41 | # To measure the 'reference' rate and the current rate 42 | timeframe: 43 | hours: 2 44 | 45 | # (Required, spike specific) 46 | # The spike rule matches when the current window contains spike_height times more 47 | # events than the reference window 48 | spike_height: 3 49 | 50 | # (Required, spike specific) 51 | # The direction of the spike 52 | # 'up' matches only spikes, 'down' matches only troughs 53 | # 'both' matches both spikes and troughs 54 | spike_type: "up" 55 | 56 | # (Required) 57 | # A list of Elasticsearch filters used for find events 58 | # These filters are joined with AND and nested in a filtered query 59 | # For more info: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl.html 60 | filter: 61 | - query: 62 | query_string: 63 | query: "field: value" 64 | - type: 65 | value: "some_doc_type" 66 | 67 | # (Required) 68 | # The alert is use when a match is found 69 | alert: 70 | - "email" 71 | 72 | # (required, email specific) 73 | # a list of email addresses to send alerts to 74 | email: 75 | - "elastalert@example.com" 76 | -------------------------------------------------------------------------------- /example_rules/example_spike_single_metric_agg.yaml: -------------------------------------------------------------------------------- 1 | name: Metricbeat Average CPU Spike Rule 2 | type: spike_aggregation 3 | 4 | #es_host: localhost 5 | #es_port: 9200 6 | 7 | index: metricbeat-* 8 | 9 | timeframe: 10 | hours: 4 11 | 12 | buffer_time: 13 | hours: 1 14 | 15 | metric_agg_key: system.cpu.user.pct 16 | metric_agg_type: avg 17 | query_key: beat.hostname 18 | doc_type: metricsets 19 | 20 | #allow_buffer_time_overlap: true 21 | #use_run_every_query_size: true 22 | 23 | # (Required one of _cur or _ref, spike specific) 24 | # The minimum value of the aggregation that will trigger the alert 25 | # For example, if we're tracking the average for a metric whose average is 0.4 between 12:00 and 2:00 26 | # and 0.95 between 2:00 and 4:00 with spike_height set to 2 and threshhold_cur set to 0.9: 27 | # _ref is 0.4 and _cur is 0.95, and the alert WILL fire 28 | # because 0.95 is greater than threshold_cur (0.9) and (_ref * spike_height (.4 * 2)) 29 | threshold_cur: 0.9 30 | 31 | # (Optional, min_doc_count) 32 | # for rules using a per-term aggregation via query_key, the minimum number of events 33 | # over the past buffer_time needed to update the spike tracker 34 | min_doc_count: 5 35 | 36 | # (Required, spike specific) 37 | # The spike aggregation rule matches when the current window contains spike_height times higher aggregated value 38 | # than the reference window 39 | spike_height: 2 40 | 41 | # (Required, spike specific) 42 | # The direction of the spike 43 | # 'up' matches only spikes, 'down' matches only troughs 44 | # 'both' matches both spikes and troughs 45 | spike_type: "up" 46 | 47 | filter: 48 | - term: 49 | metricset.name: cpu 50 | 51 | # (Required) 52 | # The alert is use when a match is found 53 | alert: 54 | - "debug" 55 | 56 | -------------------------------------------------------------------------------- /example_rules/jira_acct.txt: -------------------------------------------------------------------------------- 1 | # Example jira_account information file 2 | # You should make sure that this file is not globally readable or version controlled! (Except for this example) 3 | 4 | # Jira username 5 | user: elastalert-jira 6 | # Jira password 7 | password: p455w0rd 8 | -------------------------------------------------------------------------------- /example_rules/ssh-repeat-offender.yaml: -------------------------------------------------------------------------------- 1 | # Rule name, must be unique 2 | name: SSH abuse - reapeat offender 3 | 4 | # Alert on x events in y seconds 5 | type: frequency 6 | 7 | # Alert when this many documents matching the query occur within a timeframe 8 | num_events: 2 9 | 10 | # num_events must occur within this amount of time to trigger an alert 11 | timeframe: 12 | weeks: 1 13 | 14 | # A list of elasticsearch filters used for find events 15 | # These filters are joined with AND and nested in a filtered query 16 | # For more info: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl.html 17 | filter: 18 | - term: 19 | rule_name: "SSH abuse" 20 | 21 | index: elastalert 22 | 23 | # When the attacker continues, send a new alert after x minutes 24 | realert: 25 | weeks: 4 26 | 27 | query_key: 28 | - match_body.source.ip 29 | 30 | include: 31 | - match_body.host.hostname 32 | - match_body.user.name 33 | - match_body.source.ip 34 | 35 | alert_subject: "SSH abuse (repeat offender) on <{}> | <{}|Show Dashboard>" 36 | alert_subject_args: 37 | - match_body.host.hostname 38 | - kibana_link 39 | 40 | alert_text: |- 41 | An reapeat offender has been active on {}. 42 | 43 | IP: {} 44 | User: {} 45 | alert_text_args: 46 | - match_body.host.hostname 47 | - match_body.user.name 48 | - match_body.source.ip 49 | 50 | # The alert is use when a match is found 51 | alert: 52 | - slack 53 | 54 | slack_webhook_url: "https://hooks.slack.com/services/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" 55 | slack_username_override: "ElastAlert" 56 | 57 | # Alert body only cointains a title and text 58 | alert_text_type: alert_text_only 59 | 60 | # Link to BitSensor Kibana Dashboard 61 | use_kibana4_dashboard: "https://dev.securely.ai/app/kibana#/dashboard/37739d80-a95c-11e9-b5ba-33a34ca252fb" 62 | -------------------------------------------------------------------------------- /example_rules/ssh.yaml: -------------------------------------------------------------------------------- 1 | # Rule name, must be unique 2 | name: SSH abuse (ElastAlert 3.0.1) - 2 3 | 4 | # Alert on x events in y seconds 5 | type: frequency 6 | 7 | # Alert when this many documents matching the query occur within a timeframe 8 | num_events: 20 9 | 10 | # num_events must occur within this amount of time to trigger an alert 11 | timeframe: 12 | minutes: 60 13 | 14 | # A list of elasticsearch filters used for find events 15 | # These filters are joined with AND and nested in a filtered query 16 | # For more info: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl.html 17 | filter: 18 | - query: 19 | query_string: 20 | query: "event.type:authentication_failure" 21 | 22 | index: auditbeat-* 23 | 24 | # When the attacker continues, send a new alert after x minutes 25 | realert: 26 | minutes: 1 27 | 28 | query_key: 29 | - source.ip 30 | 31 | include: 32 | - host.hostname 33 | - user.name 34 | - source.ip 35 | 36 | include_match_in_root: true 37 | 38 | alert_subject: "SSH abuse on <{}> | <{}|Show Dashboard>" 39 | alert_subject_args: 40 | - host.hostname 41 | - kibana_link 42 | 43 | alert_text: |- 44 | An attack on {} is detected. 45 | The attacker looks like: 46 | User: {} 47 | IP: {} 48 | alert_text_args: 49 | - host.hostname 50 | - user.name 51 | - source.ip 52 | 53 | # The alert is use when a match is found 54 | alert: 55 | - debug 56 | 57 | slack_webhook_url: "https://hooks.slack.com/services/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" 58 | slack_username_override: "ElastAlert" 59 | 60 | # Alert body only cointains a title and text 61 | alert_text_type: alert_text_only 62 | 63 | # Link to BitSensor Kibana Dashboard 64 | use_kibana4_dashboard: "https://dev.securely.ai/app/kibana#/dashboard/37739d80-a95c-11e9-b5ba-33a34ca252fb" 65 | -------------------------------------------------------------------------------- /pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | markers = 3 | elasticsearch: mark a test as using elasticsearch. 4 | -------------------------------------------------------------------------------- /requirements-dev.txt: -------------------------------------------------------------------------------- 1 | -r requirements.txt 2 | coverage==4.5.4 3 | flake8 4 | pre-commit 5 | pylint<1.4 6 | pytest<3.3.0 7 | setuptools 8 | sphinx_rtd_theme 9 | tox<2.0 10 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | apscheduler>=3.3.0 2 | aws-requests-auth>=0.3.0 3 | blist>=1.3.6 4 | boto3>=1.4.4 5 | cffi>=1.11.5 6 | configparser>=3.5.0 7 | croniter>=0.3.16 8 | elasticsearch>=7.0.0 9 | envparse>=0.2.0 10 | exotel>=0.1.3 11 | jira>=1.0.10,<1.0.15 12 | jsonschema>=3.0.2 13 | mock>=2.0.0 14 | prison>=0.1.2 15 | py-zabbix==1.1.3 16 | PyStaticConfiguration>=0.10.3 17 | python-dateutil>=2.6.0,<2.7.0 18 | PyYAML>=5.1 19 | requests>=2.0.0 20 | stomp.py>=4.1.17 21 | texttable>=0.8.8 22 | twilio==6.0.0 23 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [flake8] 2 | exclude = .git,__pycache__,.tox,docs,virtualenv_run,modules,venv,env 3 | max-line-length = 140 4 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import os 3 | 4 | from setuptools import find_packages 5 | from setuptools import setup 6 | 7 | 8 | base_dir = os.path.dirname(__file__) 9 | setup( 10 | name='elastalert', 11 | version='0.2.4', 12 | description='Runs custom filters on Elasticsearch and alerts on matches', 13 | author='Quentin Long', 14 | author_email='qlo@yelp.com', 15 | setup_requires='setuptools', 16 | license='Copyright 2014 Yelp', 17 | classifiers=[ 18 | 'Programming Language :: Python :: 3.6', 19 | 'License :: OSI Approved :: Apache Software License', 20 | 'Operating System :: OS Independent', 21 | ], 22 | entry_points={ 23 | 'console_scripts': ['elastalert-create-index=elastalert.create_index:main', 24 | 'elastalert-test-rule=elastalert.test_rule:main', 25 | 'elastalert-rule-from-kibana=elastalert.rule_from_kibana:main', 26 | 'elastalert=elastalert.elastalert:main']}, 27 | packages=find_packages(), 28 | package_data={'elastalert': ['schema.yaml', 'es_mappings/**/*.json']}, 29 | install_requires=[ 30 | 'apscheduler>=3.3.0', 31 | 'aws-requests-auth>=0.3.0', 32 | 'blist>=1.3.6', 33 | 'boto3>=1.4.4', 34 | 'configparser>=3.5.0', 35 | 'croniter>=0.3.16', 36 | 'elasticsearch==7.0.0', 37 | 'envparse>=0.2.0', 38 | 'exotel>=0.1.3', 39 | 'jira>=2.0.0', 40 | 'jsonschema>=3.0.2', 41 | 'mock>=2.0.0', 42 | 'prison>=0.1.2', 43 | 'PyStaticConfiguration>=0.10.3', 44 | 'python-dateutil>=2.6.0,<2.7.0', 45 | 'PyYAML>=3.12', 46 | 'requests>=2.10.0', 47 | 'stomp.py>=4.1.17', 48 | 'texttable>=0.8.8', 49 | 'twilio>=6.0.0,<6.1', 50 | 'cffi>=1.11.5' 51 | ] 52 | ) 53 | -------------------------------------------------------------------------------- /supervisord.conf.example: -------------------------------------------------------------------------------- 1 | [unix_http_server] 2 | file=/var/run/elastalert_supervisor.sock 3 | 4 | [supervisord] 5 | logfile=/var/log/elastalert_supervisord.log 6 | logfile_maxbytes=1MB 7 | logfile_backups=2 8 | loglevel=debug 9 | nodaemon=false 10 | directory=%(here)s 11 | 12 | [rpcinterface:supervisor] 13 | supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface 14 | 15 | [supervisorctl] 16 | serverurl=unix:///var/run/elastalert_supervisor.sock 17 | 18 | [program:elastalert] 19 | # running globally 20 | command = 21 | python elastalert.py 22 | --verbose 23 | # (alternative) using virtualenv 24 | # command=/path/to/venv/bin/elastalert --config /path/to/config.yaml --verbose 25 | process_name=elastalert 26 | autorestart=true 27 | startsecs=15 28 | stopsignal=INT 29 | stopasgroup=true 30 | killasgroup=true 31 | stderr_logfile=/var/log/elastalert_stderr.log 32 | stderr_logfile_maxbytes=5MB 33 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Yelp/elastalert/e0bbcb5b71e9fdb4a750c3871d365a882ff17b16/tests/__init__.py -------------------------------------------------------------------------------- /tests/auth_test.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | from elastalert.auth import Auth, RefeshableAWSRequestsAuth 3 | 4 | 5 | def test_auth_none(): 6 | 7 | auth = Auth()( 8 | host='localhost:8080', 9 | username=None, 10 | password=None, 11 | aws_region=None, 12 | profile_name=None 13 | ) 14 | 15 | assert not auth 16 | 17 | 18 | def test_auth_username_password(): 19 | 20 | auth = Auth()( 21 | host='localhost:8080', 22 | username='user', 23 | password='password', 24 | aws_region=None, 25 | profile_name=None 26 | ) 27 | 28 | assert auth == 'user:password' 29 | 30 | 31 | def test_auth_aws_region(): 32 | 33 | auth = Auth()( 34 | host='localhost:8080', 35 | username=None, 36 | password=None, 37 | aws_region='us-east-1', 38 | profile_name=None 39 | ) 40 | 41 | assert type(auth) == RefeshableAWSRequestsAuth 42 | assert auth.aws_region == 'us-east-1' 43 | -------------------------------------------------------------------------------- /tests/conftest.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import datetime 3 | import logging 4 | import os 5 | 6 | import mock 7 | import pytest 8 | 9 | import elastalert.elastalert 10 | import elastalert.util 11 | from elastalert.util import dt_to_ts 12 | from elastalert.util import ts_to_dt 13 | 14 | writeback_index = 'wb' 15 | 16 | 17 | def pytest_addoption(parser): 18 | parser.addoption( 19 | "--runelasticsearch", action="store_true", default=False, help="run elasticsearch tests" 20 | ) 21 | 22 | 23 | def pytest_collection_modifyitems(config, items): 24 | if config.getoption("--runelasticsearch"): 25 | # --runelasticsearch given in cli: run elasticsearch tests, skip ordinary unit tests 26 | skip_unit_tests = pytest.mark.skip(reason="not running when --runelasticsearch option is used to run") 27 | for item in items: 28 | if "elasticsearch" not in item.keywords: 29 | item.add_marker(skip_unit_tests) 30 | else: 31 | # skip elasticsearch tests 32 | skip_elasticsearch = pytest.mark.skip(reason="need --runelasticsearch option to run") 33 | for item in items: 34 | if "elasticsearch" in item.keywords: 35 | item.add_marker(skip_elasticsearch) 36 | 37 | 38 | @pytest.fixture(scope='function', autouse=True) 39 | def reset_loggers(): 40 | """Prevent logging handlers from capturing temporary file handles. 41 | 42 | For example, a test that uses the `capsys` fixture and calls 43 | `logging.exception()` will initialize logging with a default handler that 44 | captures `sys.stderr`. When the test ends, the file handles will be closed 45 | and `sys.stderr` will be returned to its original handle, but the logging 46 | will have a dangling reference to the temporary handle used in the `capsys` 47 | fixture. 48 | 49 | """ 50 | logger = logging.getLogger() 51 | for handler in logger.handlers: 52 | logger.removeHandler(handler) 53 | 54 | 55 | class mock_es_indices_client(object): 56 | def __init__(self): 57 | self.exists = mock.Mock(return_value=True) 58 | 59 | 60 | class mock_es_client(object): 61 | def __init__(self, host='es', port=14900): 62 | self.host = host 63 | self.port = port 64 | self.return_hits = [] 65 | self.search = mock.Mock() 66 | self.deprecated_search = mock.Mock() 67 | self.create = mock.Mock() 68 | self.index = mock.Mock() 69 | self.delete = mock.Mock() 70 | self.info = mock.Mock(return_value={'status': 200, 'name': 'foo', 'version': {'number': '2.0'}}) 71 | self.ping = mock.Mock(return_value=True) 72 | self.indices = mock_es_indices_client() 73 | self.es_version = mock.Mock(return_value='2.0') 74 | self.is_atleastfive = mock.Mock(return_value=False) 75 | self.is_atleastsix = mock.Mock(return_value=False) 76 | self.is_atleastsixtwo = mock.Mock(return_value=False) 77 | self.is_atleastsixsix = mock.Mock(return_value=False) 78 | self.is_atleastseven = mock.Mock(return_value=False) 79 | self.resolve_writeback_index = mock.Mock(return_value=writeback_index) 80 | 81 | 82 | class mock_es_sixsix_client(object): 83 | def __init__(self, host='es', port=14900): 84 | self.host = host 85 | self.port = port 86 | self.return_hits = [] 87 | self.search = mock.Mock() 88 | self.deprecated_search = mock.Mock() 89 | self.create = mock.Mock() 90 | self.index = mock.Mock() 91 | self.delete = mock.Mock() 92 | self.info = mock.Mock(return_value={'status': 200, 'name': 'foo', 'version': {'number': '6.6.0'}}) 93 | self.ping = mock.Mock(return_value=True) 94 | self.indices = mock_es_indices_client() 95 | self.es_version = mock.Mock(return_value='6.6.0') 96 | self.is_atleastfive = mock.Mock(return_value=True) 97 | self.is_atleastsix = mock.Mock(return_value=True) 98 | self.is_atleastsixtwo = mock.Mock(return_value=False) 99 | self.is_atleastsixsix = mock.Mock(return_value=True) 100 | self.is_atleastseven = mock.Mock(return_value=False) 101 | 102 | def writeback_index_side_effect(index, doc_type): 103 | if doc_type == 'silence': 104 | return index + '_silence' 105 | elif doc_type == 'past_elastalert': 106 | return index + '_past' 107 | elif doc_type == 'elastalert_status': 108 | return index + '_status' 109 | elif doc_type == 'elastalert_error': 110 | return index + '_error' 111 | return index 112 | 113 | self.resolve_writeback_index = mock.Mock(side_effect=writeback_index_side_effect) 114 | 115 | 116 | class mock_rule_loader(object): 117 | def __init__(self, conf): 118 | self.base_config = conf 119 | self.load = mock.Mock() 120 | self.get_hashes = mock.Mock() 121 | self.load_configuration = mock.Mock() 122 | 123 | 124 | class mock_ruletype(object): 125 | def __init__(self): 126 | self.add_data = mock.Mock() 127 | self.add_count_data = mock.Mock() 128 | self.add_terms_data = mock.Mock() 129 | self.matches = [] 130 | self.get_match_data = lambda x: x 131 | self.get_match_str = lambda x: "some stuff happened" 132 | self.garbage_collect = mock.Mock() 133 | 134 | 135 | class mock_alert(object): 136 | def __init__(self): 137 | self.alert = mock.Mock() 138 | 139 | def get_info(self): 140 | return {'type': 'mock'} 141 | 142 | 143 | @pytest.fixture 144 | def ea(): 145 | rules = [{'es_host': '', 146 | 'es_port': 14900, 147 | 'name': 'anytest', 148 | 'index': 'idx', 149 | 'filter': [], 150 | 'include': ['@timestamp'], 151 | 'aggregation': datetime.timedelta(0), 152 | 'realert': datetime.timedelta(0), 153 | 'processed_hits': {}, 154 | 'timestamp_field': '@timestamp', 155 | 'match_enhancements': [], 156 | 'rule_file': 'blah.yaml', 157 | 'max_query_size': 10000, 158 | 'ts_to_dt': ts_to_dt, 159 | 'dt_to_ts': dt_to_ts, 160 | '_source_enabled': True, 161 | 'run_every': datetime.timedelta(seconds=15)}] 162 | conf = {'rules_folder': 'rules', 163 | 'run_every': datetime.timedelta(minutes=10), 164 | 'buffer_time': datetime.timedelta(minutes=5), 165 | 'alert_time_limit': datetime.timedelta(hours=24), 166 | 'es_host': 'es', 167 | 'es_port': 14900, 168 | 'writeback_index': 'wb', 169 | 'writeback_alias': 'wb_a', 170 | 'rules': rules, 171 | 'max_query_size': 10000, 172 | 'old_query_limit': datetime.timedelta(weeks=1), 173 | 'disable_rules_on_error': False, 174 | 'scroll_keepalive': '30s'} 175 | elastalert.util.elasticsearch_client = mock_es_client 176 | conf['rules_loader'] = mock_rule_loader(conf) 177 | elastalert.elastalert.elasticsearch_client = mock_es_client 178 | with mock.patch('elastalert.elastalert.load_conf') as load_conf: 179 | with mock.patch('elastalert.elastalert.BackgroundScheduler'): 180 | load_conf.return_value = conf 181 | conf['rules_loader'].load.return_value = rules 182 | conf['rules_loader'].get_hashes.return_value = {} 183 | ea = elastalert.elastalert.ElastAlerter(['--pin_rules']) 184 | ea.rules[0]['type'] = mock_ruletype() 185 | ea.rules[0]['alert'] = [mock_alert()] 186 | ea.writeback_es = mock_es_client() 187 | ea.writeback_es.search.return_value = {'hits': {'hits': []}, 'total': 0} 188 | ea.writeback_es.deprecated_search.return_value = {'hits': {'hits': []}} 189 | ea.writeback_es.index.return_value = {'_id': 'ABCD', 'created': True} 190 | ea.current_es = mock_es_client('', '') 191 | ea.thread_data.current_es = ea.current_es 192 | ea.thread_data.num_hits = 0 193 | ea.thread_data.num_dupes = 0 194 | return ea 195 | 196 | 197 | @pytest.fixture 198 | def ea_sixsix(): 199 | rules = [{'es_host': '', 200 | 'es_port': 14900, 201 | 'name': 'anytest', 202 | 'index': 'idx', 203 | 'filter': [], 204 | 'include': ['@timestamp'], 205 | 'run_every': datetime.timedelta(seconds=1), 206 | 'aggregation': datetime.timedelta(0), 207 | 'realert': datetime.timedelta(0), 208 | 'processed_hits': {}, 209 | 'timestamp_field': '@timestamp', 210 | 'match_enhancements': [], 211 | 'rule_file': 'blah.yaml', 212 | 'max_query_size': 10000, 213 | 'ts_to_dt': ts_to_dt, 214 | 'dt_to_ts': dt_to_ts, 215 | '_source_enabled': True}] 216 | conf = {'rules_folder': 'rules', 217 | 'run_every': datetime.timedelta(minutes=10), 218 | 'buffer_time': datetime.timedelta(minutes=5), 219 | 'alert_time_limit': datetime.timedelta(hours=24), 220 | 'es_host': 'es', 221 | 'es_port': 14900, 222 | 'writeback_index': writeback_index, 223 | 'writeback_alias': 'wb_a', 224 | 'rules': rules, 225 | 'max_query_size': 10000, 226 | 'old_query_limit': datetime.timedelta(weeks=1), 227 | 'disable_rules_on_error': False, 228 | 'scroll_keepalive': '30s'} 229 | conf['rules_loader'] = mock_rule_loader(conf) 230 | elastalert.elastalert.elasticsearch_client = mock_es_sixsix_client 231 | elastalert.util.elasticsearch_client = mock_es_sixsix_client 232 | with mock.patch('elastalert.elastalert.load_conf') as load_conf: 233 | with mock.patch('elastalert.elastalert.BackgroundScheduler'): 234 | load_conf.return_value = conf 235 | conf['rules_loader'].load.return_value = rules 236 | conf['rules_loader'].get_hashes.return_value = {} 237 | ea_sixsix = elastalert.elastalert.ElastAlerter(['--pin_rules']) 238 | ea_sixsix.rules[0]['type'] = mock_ruletype() 239 | ea_sixsix.rules[0]['alert'] = [mock_alert()] 240 | ea_sixsix.writeback_es = mock_es_sixsix_client() 241 | ea_sixsix.writeback_es.search.return_value = {'hits': {'hits': []}} 242 | ea_sixsix.writeback_es.deprecated_search.return_value = {'hits': {'hits': []}} 243 | ea_sixsix.writeback_es.index.return_value = {'_id': 'ABCD'} 244 | ea_sixsix.current_es = mock_es_sixsix_client('', -1) 245 | return ea_sixsix 246 | 247 | 248 | @pytest.fixture(scope='function') 249 | def environ(): 250 | """py.test fixture to get a fresh mutable environment.""" 251 | old_env = os.environ 252 | new_env = dict(list(old_env.items())) 253 | os.environ = new_env 254 | yield os.environ 255 | os.environ = old_env 256 | -------------------------------------------------------------------------------- /tests/create_index_test.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import json 3 | 4 | import pytest 5 | 6 | import elastalert.create_index 7 | 8 | es_mappings = [ 9 | 'elastalert', 10 | 'elastalert_error', 11 | 'elastalert_status', 12 | 'past_elastalert', 13 | 'silence' 14 | ] 15 | 16 | 17 | @pytest.mark.parametrize('es_mapping', es_mappings) 18 | def test_read_default_index_mapping(es_mapping): 19 | mapping = elastalert.create_index.read_es_index_mapping(es_mapping) 20 | assert es_mapping not in mapping 21 | print((json.dumps(mapping, indent=2))) 22 | 23 | 24 | @pytest.mark.parametrize('es_mapping', es_mappings) 25 | def test_read_es_5_index_mapping(es_mapping): 26 | mapping = elastalert.create_index.read_es_index_mapping(es_mapping, 5) 27 | assert es_mapping in mapping 28 | print((json.dumps(mapping, indent=2))) 29 | 30 | 31 | @pytest.mark.parametrize('es_mapping', es_mappings) 32 | def test_read_es_6_index_mapping(es_mapping): 33 | mapping = elastalert.create_index.read_es_index_mapping(es_mapping, 6) 34 | assert es_mapping not in mapping 35 | print((json.dumps(mapping, indent=2))) 36 | 37 | 38 | def test_read_default_index_mappings(): 39 | mappings = elastalert.create_index.read_es_index_mappings() 40 | assert len(mappings) == len(es_mappings) 41 | print((json.dumps(mappings, indent=2))) 42 | 43 | 44 | def test_read_es_5_index_mappings(): 45 | mappings = elastalert.create_index.read_es_index_mappings(5) 46 | assert len(mappings) == len(es_mappings) 47 | print((json.dumps(mappings, indent=2))) 48 | 49 | 50 | def test_read_es_6_index_mappings(): 51 | mappings = elastalert.create_index.read_es_index_mappings(6) 52 | assert len(mappings) == len(es_mappings) 53 | print((json.dumps(mappings, indent=2))) 54 | -------------------------------------------------------------------------------- /tests/elasticsearch_test.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import datetime 3 | import json 4 | import time 5 | 6 | import dateutil 7 | import pytest 8 | 9 | import elastalert.create_index 10 | import elastalert.elastalert 11 | from elastalert import ElasticSearchClient 12 | from elastalert.util import build_es_conn_config 13 | from tests.conftest import ea # noqa: F401 14 | 15 | test_index = 'test_index' 16 | 17 | es_host = '127.0.0.1' 18 | es_port = 9200 19 | es_timeout = 10 20 | 21 | 22 | @pytest.fixture 23 | def es_client(): 24 | es_conn_config = build_es_conn_config({'es_host': es_host, 'es_port': es_port, 'es_conn_timeout': es_timeout}) 25 | return ElasticSearchClient(es_conn_config) 26 | 27 | 28 | @pytest.mark.elasticsearch 29 | class TestElasticsearch(object): 30 | # TODO perform teardown removing data inserted into Elasticsearch 31 | # Warning!!!: Test class is not erasing its testdata on the Elasticsearch server. 32 | # This is not a problem as long as the data is manually removed or the test environment 33 | # is torn down after the test run(eg. running tests in a test environment such as Travis) 34 | def test_create_indices(self, es_client): 35 | elastalert.create_index.create_index_mappings(es_client=es_client, ea_index=test_index) 36 | indices_mappings = es_client.indices.get_mapping(test_index + '*') 37 | print(('-' * 50)) 38 | print((json.dumps(indices_mappings, indent=2))) 39 | print(('-' * 50)) 40 | if es_client.is_atleastsix(): 41 | assert test_index in indices_mappings 42 | assert test_index + '_error' in indices_mappings 43 | assert test_index + '_status' in indices_mappings 44 | assert test_index + '_silence' in indices_mappings 45 | assert test_index + '_past' in indices_mappings 46 | else: 47 | assert 'elastalert' in indices_mappings[test_index]['mappings'] 48 | assert 'elastalert_error' in indices_mappings[test_index]['mappings'] 49 | assert 'elastalert_status' in indices_mappings[test_index]['mappings'] 50 | assert 'silence' in indices_mappings[test_index]['mappings'] 51 | assert 'past_elastalert' in indices_mappings[test_index]['mappings'] 52 | 53 | @pytest.mark.usefixtures("ea") 54 | def test_aggregated_alert(self, ea, es_client): # noqa: F811 55 | match_timestamp = datetime.datetime.now(tz=dateutil.tz.tzutc()).replace(microsecond=0) + datetime.timedelta( 56 | days=1) 57 | ea.rules[0]['aggregate_by_match_time'] = True 58 | match = {'@timestamp': match_timestamp, 59 | 'num_hits': 0, 60 | 'num_matches': 3 61 | } 62 | ea.writeback_es = es_client 63 | res = ea.add_aggregated_alert(match, ea.rules[0]) 64 | if ea.writeback_es.is_atleastsix(): 65 | assert res['result'] == 'created' 66 | else: 67 | assert res['created'] is True 68 | # Make sure added data is available for querying 69 | time.sleep(2) 70 | # Now lets find the pending aggregated alert 71 | assert ea.find_pending_aggregate_alert(ea.rules[0]) 72 | 73 | @pytest.mark.usefixtures("ea") 74 | def test_silenced(self, ea, es_client): # noqa: F811 75 | until_timestamp = datetime.datetime.now(tz=dateutil.tz.tzutc()).replace(microsecond=0) + datetime.timedelta( 76 | days=1) 77 | ea.writeback_es = es_client 78 | res = ea.set_realert(ea.rules[0]['name'], until_timestamp, 0) 79 | if ea.writeback_es.is_atleastsix(): 80 | assert res['result'] == 'created' 81 | else: 82 | assert res['created'] is True 83 | # Make sure added data is available for querying 84 | time.sleep(2) 85 | # Force lookup in elasticsearch 86 | ea.silence_cache = {} 87 | # Now lets check if our rule is reported as silenced 88 | assert ea.is_silenced(ea.rules[0]['name']) 89 | 90 | @pytest.mark.usefixtures("ea") 91 | def test_get_hits(self, ea, es_client): # noqa: F811 92 | start = datetime.datetime.now(tz=dateutil.tz.tzutc()).replace(microsecond=0) 93 | end = start + datetime.timedelta(days=1) 94 | ea.current_es = es_client 95 | if ea.current_es.is_atleastfive(): 96 | ea.rules[0]['five'] = True 97 | else: 98 | ea.rules[0]['five'] = False 99 | ea.thread_data.current_es = ea.current_es 100 | hits = ea.get_hits(ea.rules[0], start, end, test_index) 101 | 102 | assert isinstance(hits, list) 103 | -------------------------------------------------------------------------------- /tests/kibana_test.py: -------------------------------------------------------------------------------- 1 | import copy 2 | import json 3 | 4 | from elastalert.kibana import add_filter 5 | from elastalert.kibana import dashboard_temp 6 | from elastalert.kibana import filters_from_dashboard 7 | from elastalert.kibana import kibana4_dashboard_link 8 | 9 | 10 | # Dashboard schema with only filters section 11 | test_dashboard = '''{ 12 | "title": "AD Lock Outs", 13 | "services": { 14 | "filter": { 15 | "list": { 16 | "0": { 17 | "type": "time", 18 | "field": "@timestamp", 19 | "from": "now-7d", 20 | "to": "now", 21 | "mandate": "must", 22 | "active": true, 23 | "alias": "", 24 | "id": 0 25 | }, 26 | "1": { 27 | "type": "field", 28 | "field": "_log_type", 29 | "query": "\\"active_directory\\"", 30 | "mandate": "must", 31 | "active": true, 32 | "alias": "", 33 | "id": 1 34 | }, 35 | "2": { 36 | "type": "querystring", 37 | "query": "ad.security_auditing_code:4740", 38 | "mandate": "must", 39 | "active": true, 40 | "alias": "", 41 | "id": 2 42 | } 43 | }, 44 | "ids": [ 45 | 0, 46 | 1, 47 | 2 48 | ] 49 | } 50 | } 51 | }''' 52 | test_dashboard = json.loads(test_dashboard) 53 | 54 | 55 | def test_filters_from_dashboard(): 56 | filters = filters_from_dashboard(test_dashboard) 57 | assert {'term': {'_log_type': '"active_directory"'}} in filters 58 | assert {'query': {'query_string': {'query': 'ad.security_auditing_code:4740'}}} in filters 59 | 60 | 61 | def test_add_filter(): 62 | basic_filter = {"term": {"this": "that"}} 63 | db = copy.deepcopy(dashboard_temp) 64 | add_filter(db, basic_filter) 65 | assert db['services']['filter']['list']['1'] == { 66 | 'field': 'this', 67 | 'alias': '', 68 | 'mandate': 'must', 69 | 'active': True, 70 | 'query': '"that"', 71 | 'type': 'field', 72 | 'id': 1 73 | } 74 | 75 | list_filter = {"term": {"this": ["that", "those"]}} 76 | db = copy.deepcopy(dashboard_temp) 77 | add_filter(db, list_filter) 78 | assert db['services']['filter']['list']['1'] == { 79 | 'field': 'this', 80 | 'alias': '', 81 | 'mandate': 'must', 82 | 'active': True, 83 | 'query': '("that" AND "those")', 84 | 'type': 'field', 85 | 'id': 1 86 | } 87 | 88 | 89 | def test_url_encoded(): 90 | url = kibana4_dashboard_link('example.com/#/Dashboard', '2015-01-01T00:00:00Z', '2017-01-01T00:00:00Z') 91 | assert not any([special_char in url for special_char in ["',\":;?&=()"]]) 92 | 93 | 94 | def test_url_env_substitution(environ): 95 | environ.update({ 96 | 'KIBANA_HOST': 'kibana', 97 | 'KIBANA_PORT': '5601', 98 | }) 99 | url = kibana4_dashboard_link( 100 | 'http://$KIBANA_HOST:$KIBANA_PORT/#/Dashboard', 101 | '2015-01-01T00:00:00Z', 102 | '2017-01-01T00:00:00Z', 103 | ) 104 | assert url.startswith('http://kibana:5601/#/Dashboard') 105 | -------------------------------------------------------------------------------- /tests/util_test.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | from datetime import datetime 3 | from datetime import timedelta 4 | 5 | import mock 6 | import pytest 7 | from dateutil.parser import parse as dt 8 | 9 | from elastalert.util import add_raw_postfix 10 | from elastalert.util import format_index 11 | from elastalert.util import lookup_es_key 12 | from elastalert.util import parse_deadline 13 | from elastalert.util import parse_duration 14 | from elastalert.util import replace_dots_in_field_names 15 | from elastalert.util import resolve_string 16 | from elastalert.util import set_es_key 17 | from elastalert.util import should_scrolling_continue 18 | 19 | 20 | @pytest.mark.parametrize('spec, expected_delta', [ 21 | ('hours=2', timedelta(hours=2)), 22 | ('minutes=30', timedelta(minutes=30)), 23 | ('seconds=45', timedelta(seconds=45)), 24 | ]) 25 | def test_parse_duration(spec, expected_delta): 26 | """``unit=num`` specs can be translated into ``timedelta`` instances.""" 27 | assert parse_duration(spec) == expected_delta 28 | 29 | 30 | @pytest.mark.parametrize('spec, expected_deadline', [ 31 | ('hours=2', dt('2017-07-07T12:00:00.000Z')), 32 | ('minutes=30', dt('2017-07-07T10:30:00.000Z')), 33 | ('seconds=45', dt('2017-07-07T10:00:45.000Z')), 34 | ]) 35 | def test_parse_deadline(spec, expected_deadline): 36 | """``unit=num`` specs can be translated into ``datetime`` instances.""" 37 | 38 | # Note: Can't mock ``utcnow`` directly because ``datetime`` is a built-in. 39 | class MockDatetime(datetime): 40 | @staticmethod 41 | def utcnow(): 42 | return dt('2017-07-07T10:00:00.000Z') 43 | 44 | with mock.patch('datetime.datetime', MockDatetime): 45 | assert parse_deadline(spec) == expected_deadline 46 | 47 | 48 | def test_setting_keys(ea): 49 | expected = 12467267 50 | record = { 51 | 'Message': '12345', 52 | 'Fields': { 53 | 'ts': 'fail', 54 | 'severity': 'large', 55 | 'user': 'jimmay' 56 | } 57 | } 58 | 59 | # Set the value 60 | assert set_es_key(record, 'Fields.ts', expected) 61 | 62 | # Get the value again 63 | assert lookup_es_key(record, 'Fields.ts') == expected 64 | 65 | 66 | def test_looking_up_missing_keys(ea): 67 | record = { 68 | 'Message': '12345', 69 | 'Fields': { 70 | 'severity': 'large', 71 | 'user': 'jimmay', 72 | 'null': None 73 | } 74 | } 75 | 76 | assert lookup_es_key(record, 'Fields.ts') is None 77 | 78 | assert lookup_es_key(record, 'Fields.null.foo') is None 79 | 80 | 81 | def test_looking_up_nested_keys(ea): 82 | expected = 12467267 83 | record = { 84 | 'Message': '12345', 85 | 'Fields': { 86 | 'ts': expected, 87 | 'severity': 'large', 88 | 'user': 'jimmay' 89 | } 90 | } 91 | 92 | assert lookup_es_key(record, 'Fields.ts') == expected 93 | 94 | 95 | def test_looking_up_nested_composite_keys(ea): 96 | expected = 12467267 97 | record = { 98 | 'Message': '12345', 99 | 'Fields': { 100 | 'ts.value': expected, 101 | 'severity': 'large', 102 | 'user': 'jimmay' 103 | } 104 | } 105 | 106 | assert lookup_es_key(record, 'Fields.ts.value') == expected 107 | 108 | 109 | def test_looking_up_arrays(ea): 110 | record = { 111 | 'flags': [1, 2, 3], 112 | 'objects': [ 113 | {'foo': 'bar'}, 114 | {'foo': [{'bar': 'baz'}]}, 115 | {'foo': {'bar': 'baz'}} 116 | ] 117 | } 118 | assert lookup_es_key(record, 'flags[0]') == 1 119 | assert lookup_es_key(record, 'flags[1]') == 2 120 | assert lookup_es_key(record, 'objects[0]foo') == 'bar' 121 | assert lookup_es_key(record, 'objects[1]foo[0]bar') == 'baz' 122 | assert lookup_es_key(record, 'objects[2]foo.bar') == 'baz' 123 | assert lookup_es_key(record, 'objects[1]foo[1]bar') is None 124 | assert lookup_es_key(record, 'objects[1]foo[0]baz') is None 125 | 126 | 127 | def test_add_raw_postfix(ea): 128 | expected = 'foo.raw' 129 | assert add_raw_postfix('foo', False) == expected 130 | assert add_raw_postfix('foo.raw', False) == expected 131 | expected = 'foo.keyword' 132 | assert add_raw_postfix('foo', True) == expected 133 | assert add_raw_postfix('foo.keyword', True) == expected 134 | 135 | 136 | def test_replace_dots_in_field_names(ea): 137 | actual = { 138 | 'a': { 139 | 'b.c': 'd', 140 | 'e': { 141 | 'f': { 142 | 'g.h': 0 143 | } 144 | } 145 | }, 146 | 'i.j.k': 1, 147 | 'l': { 148 | 'm': 2 149 | } 150 | } 151 | expected = { 152 | 'a': { 153 | 'b_c': 'd', 154 | 'e': { 155 | 'f': { 156 | 'g_h': 0 157 | } 158 | } 159 | }, 160 | 'i_j_k': 1, 161 | 'l': { 162 | 'm': 2 163 | } 164 | } 165 | assert replace_dots_in_field_names(actual) == expected 166 | assert replace_dots_in_field_names({'a': 0, 1: 2}) == {'a': 0, 1: 2} 167 | 168 | 169 | def test_resolve_string(ea): 170 | match = { 171 | 'name': 'mySystem', 172 | 'temperature': 45, 173 | 'humidity': 80.56, 174 | 'sensors': ['outsideSensor', 'insideSensor'], 175 | 'foo': {'bar': 'baz'} 176 | } 177 | 178 | expected_outputs = [ 179 | "mySystem is online ", 180 | "Sensors ['outsideSensor', 'insideSensor'] in the have temp 45 and 80.56 humidity", 181 | "Actuator in the has temp ", 182 | 'Something baz'] 183 | old_style_strings = [ 184 | "%(name)s is online %(noKey)s", 185 | "Sensors %(sensors)s in the %(noPlace)s have temp %(temperature)s and %(humidity)s humidity", 186 | "Actuator %(noKey)s in the %(noPlace)s has temp %(noKey)s", 187 | 'Something %(foo.bar)s'] 188 | 189 | assert resolve_string(old_style_strings[0], match) == expected_outputs[0] 190 | assert resolve_string(old_style_strings[1], match) == expected_outputs[1] 191 | assert resolve_string(old_style_strings[2], match) == expected_outputs[2] 192 | assert resolve_string(old_style_strings[3], match) == expected_outputs[3] 193 | 194 | new_style_strings = [ 195 | "{name} is online {noKey}", 196 | "Sensors {sensors} in the {noPlace} have temp {temperature} and {humidity} humidity", 197 | "Actuator {noKey} in the {noPlace} has temp {noKey}", 198 | "Something {foo[bar]}"] 199 | 200 | assert resolve_string(new_style_strings[0], match) == expected_outputs[0] 201 | assert resolve_string(new_style_strings[1], match) == expected_outputs[1] 202 | assert resolve_string(new_style_strings[2], match) == expected_outputs[2] 203 | assert resolve_string(new_style_strings[3], match) == expected_outputs[3] 204 | 205 | 206 | def test_format_index(): 207 | pattern = 'logstash-%Y.%m.%d' 208 | pattern2 = 'logstash-%Y.%W' 209 | date = dt('2018-06-25T12:00:00Z') 210 | date2 = dt('2018-06-26T12:00:00Z') 211 | assert sorted(format_index(pattern, date, date).split(',')) == ['logstash-2018.06.25'] 212 | assert sorted(format_index(pattern, date, date2).split(',')) == ['logstash-2018.06.25', 'logstash-2018.06.26'] 213 | assert sorted(format_index(pattern, date, date2, True).split(',')) == ['logstash-2018.06.24', 214 | 'logstash-2018.06.25', 215 | 'logstash-2018.06.26'] 216 | assert sorted(format_index(pattern2, date, date2, True).split(',')) == ['logstash-2018.25', 'logstash-2018.26'] 217 | 218 | 219 | def test_should_scrolling_continue(): 220 | rule_no_max_scrolling = {'max_scrolling_count': 0, 'scrolling_cycle': 1} 221 | rule_reached_max_scrolling = {'max_scrolling_count': 2, 'scrolling_cycle': 2} 222 | rule_before_first_run = {'max_scrolling_count': 0, 'scrolling_cycle': 0} 223 | rule_before_max_scrolling = {'max_scrolling_count': 2, 'scrolling_cycle': 1} 224 | rule_over_max_scrolling = {'max_scrolling_count': 2, 'scrolling_cycle': 3} 225 | 226 | assert should_scrolling_continue(rule_no_max_scrolling) is True 227 | assert should_scrolling_continue(rule_reached_max_scrolling) is False 228 | assert should_scrolling_continue(rule_before_first_run) is True 229 | assert should_scrolling_continue(rule_before_max_scrolling) is True 230 | assert should_scrolling_continue(rule_over_max_scrolling) is False 231 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | [tox] 2 | project = elastalert 3 | envlist = py36,docs 4 | 5 | [testenv] 6 | deps = -rrequirements-dev.txt 7 | commands = 8 | coverage run --source=elastalert/,tests/ -m pytest --strict {posargs} 9 | coverage report -m 10 | flake8 . 11 | 12 | [testenv:lint] 13 | deps = {[testenv]deps} 14 | pylint 15 | commands = 16 | pylint --rcfile=.pylintrc elastalert 17 | pylint --rcfile=.pylintrc tests 18 | 19 | [testenv:devenv] 20 | envdir = virtualenv_run 21 | commands = 22 | 23 | [pytest] 24 | norecursedirs = .* virtualenv_run docs build venv env 25 | 26 | [testenv:docs] 27 | deps = {[testenv]deps} 28 | sphinx==1.6.6 29 | changedir = docs 30 | commands = sphinx-build -b html -d build/doctrees -W source build/html 31 | --------------------------------------------------------------------------------