├── .gitignore ├── .travis.yml ├── LICENSE ├── MANIFEST.in ├── Makefile ├── README.md ├── elasticmock ├── __init__.py ├── behaviour │ ├── __init__.py │ └── server_failure.py ├── fake_cluster.py ├── fake_elasticsearch.py ├── fake_indices.py └── utilities │ ├── __init__.py │ └── decorator.py ├── requirements.txt ├── requirements_test.txt ├── setup.cfg ├── setup.py ├── tests ├── __init__.py ├── fake_cluster │ ├── __init__.py │ └── test_health.py ├── fake_elasticsearch │ ├── __init__.py │ ├── behaviour │ │ ├── __init__.py │ │ └── test_server_failure.py │ ├── test_bulk.py │ ├── test_count.py │ ├── test_delete.py │ ├── test_exists.py │ ├── test_get.py │ ├── test_index.py │ ├── test_info.py │ ├── test_instance.py │ ├── test_ping.py │ ├── test_scroll.py │ ├── test_search.py │ └── test_suggest.py ├── fake_indices │ ├── __init__.py │ ├── test_create.py │ ├── test_delete.py │ ├── test_exists.py │ └── test_refresh.py └── tox_banner.py └── tox.ini /.gitignore: -------------------------------------------------------------------------------- 1 | 2 | # Created by https://www.gitignore.io/api/intellij,linux,osx,pycharm,python 3 | 4 | # Custom 5 | bin/ 6 | share/ 7 | pyvenv.cfg 8 | 9 | ### Visual Studio ### 10 | .vscode/ 11 | 12 | ### Intellij ### 13 | # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and Webstorm 14 | # Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 15 | 16 | # User-specific stuff: 17 | .idea/ 18 | .idea/workspace.xml 19 | .idea/tasks.xml 20 | .idea/dictionaries 21 | .idea/vcs.xml 22 | .idea/jsLibraryMappings.xml 23 | 24 | # Sensitive or high-churn files: 25 | .idea/dataSources.ids 26 | .idea/dataSources.xml 27 | .idea/dataSources.local.xml 28 | .idea/sqlDataSources.xml 29 | .idea/dynamic.xml 30 | .idea/uiDesigner.xml 31 | 32 | # Gradle: 33 | .idea/gradle.xml 34 | .idea/libraries 35 | 36 | # Mongo Explorer plugin: 37 | .idea/mongoSettings.xml 38 | 39 | ## File-based project format: 40 | *.iws 41 | 42 | ## Plugin-specific files: 43 | 44 | # IntelliJ 45 | /out/ 46 | 47 | # mpeltonen/sbt-idea plugin 48 | .idea_modules/ 49 | 50 | # JIRA plugin 51 | atlassian-ide-plugin.xml 52 | 53 | # Crashlytics plugin (for Android Studio and IntelliJ) 54 | com_crashlytics_export_strings.xml 55 | crashlytics.properties 56 | crashlytics-build.properties 57 | fabric.properties 58 | 59 | ### Intellij Patch ### 60 | # Comment Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-215987721 61 | 62 | # *.iml 63 | # modules.xml 64 | # .idea/misc.xml 65 | # *.ipr 66 | 67 | 68 | ### Linux ### 69 | *~ 70 | 71 | # temporary files which can be created if a process still has a handle open of a deleted file 72 | .fuse_hidden* 73 | 74 | # KDE directory preferences 75 | .directory 76 | 77 | # Linux trash folder which might appear on any partition or disk 78 | .Trash-* 79 | 80 | 81 | ### OSX ### 82 | *.DS_Store 83 | .AppleDouble 84 | .LSOverride 85 | 86 | # Icon must end with two \r 87 | Icon 88 | 89 | 90 | # Thumbnails 91 | ._* 92 | 93 | # Files that might appear in the root of a volume 94 | .DocumentRevisions-V100 95 | .fseventsd 96 | .Spotlight-V100 97 | .TemporaryItems 98 | .Trashes 99 | .VolumeIcon.icns 100 | .com.apple.timemachine.donotpresent 101 | 102 | # Directories potentially created on remote AFP share 103 | .AppleDB 104 | .AppleDesktop 105 | Network Trash Folder 106 | Temporary Items 107 | .apdisk 108 | 109 | 110 | ### PyCharm ### 111 | # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and Webstorm 112 | # Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 113 | 114 | # User-specific stuff: 115 | .idea/workspace.xml 116 | .idea/tasks.xml 117 | .idea/dictionaries 118 | .idea/vcs.xml 119 | .idea/jsLibraryMappings.xml 120 | 121 | # Sensitive or high-churn files: 122 | .idea/dataSources.ids 123 | .idea/dataSources.xml 124 | .idea/dataSources.local.xml 125 | .idea/sqlDataSources.xml 126 | .idea/dynamic.xml 127 | .idea/uiDesigner.xml 128 | 129 | # Gradle: 130 | .idea/gradle.xml 131 | .idea/libraries 132 | 133 | # Mongo Explorer plugin: 134 | .idea/mongoSettings.xml 135 | 136 | ## File-based project format: 137 | *.iws 138 | 139 | ## Plugin-specific files: 140 | 141 | # IntelliJ 142 | /out/ 143 | 144 | # mpeltonen/sbt-idea plugin 145 | .idea_modules/ 146 | 147 | # JIRA plugin 148 | atlassian-ide-plugin.xml 149 | 150 | # Crashlytics plugin (for Android Studio and IntelliJ) 151 | com_crashlytics_export_strings.xml 152 | crashlytics.properties 153 | crashlytics-build.properties 154 | fabric.properties 155 | 156 | ### PyCharm Patch ### 157 | # Comment Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-215987721 158 | 159 | # *.iml 160 | # modules.xml 161 | # .idea/misc.xml 162 | # *.ipr 163 | 164 | 165 | ### Python ### 166 | # Byte-compiled / optimized / DLL files 167 | __pycache__/ 168 | *.py[cod] 169 | *$py.class 170 | 171 | # C extensions 172 | *.so 173 | 174 | # Distribution / packaging 175 | .Python 176 | env/ 177 | build/ 178 | develop-eggs/ 179 | dist/ 180 | downloads/ 181 | eggs/ 182 | .eggs/ 183 | lib/ 184 | lib64/ 185 | parts/ 186 | sdist/ 187 | var/ 188 | *.egg-info/ 189 | .installed.cfg 190 | *.egg 191 | 192 | # PyInstaller 193 | # Usually these files are written by a python script from a template 194 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 195 | *.manifest 196 | *.spec 197 | 198 | # Installer logs 199 | pip-log.txt 200 | pip-delete-this-directory.txt 201 | 202 | # Unit test / coverage reports 203 | htmlcov/ 204 | .tox/ 205 | .coverage 206 | .coverage.* 207 | .cache 208 | nosetests.xml 209 | coverage.xml 210 | *,cover 211 | .hypothesis/ 212 | 213 | # Translations 214 | *.mo 215 | *.pot 216 | 217 | # Django stuff: 218 | *.log 219 | local_settings.py 220 | 221 | # Flask stuff: 222 | instance/ 223 | .webassets-cache 224 | 225 | # Scrapy stuff: 226 | .scrapy 227 | 228 | # Sphinx documentation 229 | docs/_build/ 230 | 231 | # PyBuilder 232 | target/ 233 | 234 | # IPython Notebook 235 | .ipynb_checkpoints 236 | 237 | # pyenv 238 | .python-version 239 | 240 | # celery beat schedule file 241 | celerybeat-schedule 242 | 243 | # dotenv 244 | .env 245 | 246 | # virtualenv 247 | venv/ 248 | ENV/ 249 | 250 | # Spyder project settings 251 | .spyderproject 252 | 253 | # Rope project settings 254 | .ropeproject 255 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | dist: xenial 2 | language: python 3 | python: 4 | - "3.6" 5 | - "3.7" 6 | - "3.8" 7 | - "3.9" 8 | install: 9 | - pip install tox-travis 10 | - pip install coveralls 11 | script: 12 | tox 13 | after_success: 14 | coveralls 15 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2016 Marcos Cardoso 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include README.md 2 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | ELASTICMOCK_VERSION='1.8.1' 2 | 3 | install: 4 | pip3 install -r requirements.txt 5 | 6 | test_install: install 7 | pip3 install -r requirements_test.txt 8 | 9 | test: test_install 10 | python3.9 setup.py test 11 | 12 | upload: create_dist 13 | pip3 install twine 14 | twine upload dist/* 15 | git push 16 | 17 | create_dist: create_dist_no_commit update_pip 18 | rm -rf dist 19 | python3.9 setup.py sdist 20 | 21 | create_dist_no_commit: update_pip 22 | rm -rf dist 23 | python3.9 setup.py sdist 24 | 25 | create_dist_commit: 26 | git commit --all -m "Bump version ${ELASTICMOCK_VERSION}" 27 | git tag ${ELASTICMOCK_VERSION} 28 | 29 | update_pip: 30 | pip3 install --upgrade pip 31 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # ElasticMock 2 | 3 | Python Elasticsearch Mock for test purposes 4 | 5 | [![Build Status](https://travis-ci.org/vrcmarcos/elasticmock.svg?branch=master)](https://travis-ci.org/vrcmarcos/elasticmock) [![Coverage Status](https://coveralls.io/repos/github/vrcmarcos/elasticmock/badge.svg?branch=master)](https://coveralls.io/github/vrcmarcos/elasticmock?branch=master) [![PyPI version](https://badge.fury.io/py/ElasticMock.svg)](https://badge.fury.io/py/ElasticMock) [![GitHub license](https://img.shields.io/github/license/vrcmarcos/elasticmock)](https://github.com/vrcmarcos/elasticmock/blob/master/LICENSE) ![PyPI - Python Version](https://img.shields.io/pypi/pyversions/ElasticMock) ![ElasticSearch Version](https://img.shields.io/badge/elasticsearch-1%20%7C%202%20%7C%205%20%7C%206%20%7C%207-blue) 6 | 7 | ![Libraries.io dependency status for latest release](https://img.shields.io/librariesio/release/pypi/elasticmock) [![Downloads](https://pepy.tech/badge/elasticmock/month)](https://pepy.tech/project/elasticmock/month) 8 | 9 | ## Installation 10 | 11 | ```shell 12 | pip install ElasticMock 13 | ``` 14 | 15 | ## Usage 16 | 17 | To use ElasticMock, decorate your test method with **@elasticmock** decorator: 18 | 19 | ```python 20 | from unittest import TestCase 21 | 22 | from elasticmock import elasticmock 23 | 24 | 25 | class TestClass(TestCase): 26 | 27 | @elasticmock 28 | def test_should_return_something_from_elasticsearch(self): 29 | self.assertIsNotNone(some_function_that_uses_elasticsearch()) 30 | ``` 31 | 32 | ### Custom Behaviours 33 | 34 | You can also force the behaviour of the ElasticSearch instance by importing the `elasticmock.behaviour` module: 35 | 36 | ```python 37 | from unittest import TestCase 38 | 39 | from elasticmock import behaviour 40 | 41 | 42 | class TestClass(TestCase): 43 | 44 | ... 45 | 46 | def test_should_return_internal_server_error_when_simulate_server_error_is_true(self): 47 | behaviour.server_failure.enable() 48 | ... 49 | behaviour.server_failure.disable() 50 | ``` 51 | 52 | You can also disable all behaviours by calling `behaviour.disable_all()` (Consider put this in your `def tearDown(self)` method) 53 | 54 | #### Available Behaviours 55 | 56 | * `server_failure`: Will make all calls to ElasticSearch returns the following error message: 57 | ```python 58 | { 59 | 'status_code': 500, 60 | 'error': 'Internal Server Error' 61 | } 62 | ``` 63 | 64 | ## Code example 65 | 66 | Let's say you have a prod code snippet like this one: 67 | 68 | ```python 69 | import elasticsearch 70 | 71 | class FooService: 72 | 73 | def __init__(self): 74 | self.es = elasticsearch.Elasticsearch(hosts=[{'host': 'localhost', 'port': 9200}]) 75 | 76 | def create(self, index, body): 77 | es_object = self.es.index(index, body) 78 | return es_object.get('_id') 79 | 80 | def read(self, index, id): 81 | es_object = self.es.get(index, id) 82 | return es_object.get('_source') 83 | 84 | ``` 85 | 86 | Than you should be able to test this class by mocking ElasticSearch using the following test class: 87 | 88 | ```python 89 | from unittest import TestCase 90 | from elasticmock import elasticmock 91 | from foo.bar import FooService 92 | 93 | class FooServiceTest(TestCase): 94 | 95 | @elasticmock 96 | def should_create_and_read_object(self): 97 | # Variables used to test 98 | index = 'test-index' 99 | expected_document = { 100 | 'foo': 'bar' 101 | } 102 | 103 | # Instantiate service 104 | service = FooService() 105 | 106 | # Index document on ElasticSearch 107 | id = service.create(index, expected_document) 108 | self.assertIsNotNone(id) 109 | 110 | # Retrive dpcument from ElasticSearch 111 | document = service.read(index, id) 112 | self.assertEquals(expected_document, document) 113 | 114 | ``` 115 | 116 | ## Notes: 117 | 118 | - The mocked **search** method returns **all available documents** indexed on the index with the requested document type. 119 | - The mocked **suggest** method returns the exactly suggestions dictionary passed as body serialized in Elasticsearch.suggest response. **Atention:** If the term is an *int*, the suggestion will be ```python term + 1```. If not, the suggestion will be formatted as ```python {0}_suggestion.format(term) ```. 120 | Example: 121 | - **Suggestion Body**: 122 | ```python 123 | suggestion_body = { 124 | 'suggestion-string': { 125 | 'text': 'test_text', 126 | 'term': { 127 | 'field': 'string' 128 | } 129 | }, 130 | 'suggestion-id': { 131 | 'text': 1234567, 132 | 'term': { 133 | 'field': 'id' 134 | } 135 | } 136 | } 137 | ``` 138 | - **Suggestion Response**: 139 | ```python 140 | { 141 | 'suggestion-string': [ 142 | { 143 | 'text': 'test_text', 144 | 'length': 1, 145 | 'options': [ 146 | { 147 | 'text': 'test_text_suggestion', 148 | 'freq': 1, 149 | 'score': 1.0 150 | } 151 | ], 152 | 'offset': 0 153 | } 154 | ], 155 | 'suggestion-id': [ 156 | { 157 | 'text': 1234567, 158 | 'length': 1, 159 | 'options': [ 160 | { 161 | 'text': 1234568, 162 | 'freq': 1, 163 | 'score': 1.0 164 | } 165 | ], 166 | 'offset': 0 167 | } 168 | ], 169 | } 170 | ``` 171 | 172 | ## Testing 173 | 174 | ```bash 175 | python setup.py test 176 | ``` 177 | 178 | ## Changelog 179 | 180 | #### 1.8.1: 181 | - [Add support for Python 3.9](https://github.com/vrcmarcos/elasticmock/pull/72) (Thanks [@singingwolfboy](https://github.com/singingwolfboy)) 182 | - [use unittest.mock instead of mock](https://github.com/vrcmarcos/elasticmock/pull/71) (Thanks [@singingwolfboy](https://github.com/singingwolfboy)) 183 | - [Add must_not for bool search query](https://github.com/vrcmarcos/elasticmock/pull/70) (Thanks [@t-bittarn](https://github.com/t-bittarn)) 184 | 185 | 186 | #### 1.8.0: 187 | - [Add multi_match](https://github.com/vrcmarcos/elasticmock/pull/63) (Thanks [@carlosgalvez-tiendeo](https://github.com/carlosgalvez-tiendeo)) 188 | - [Add mget](https://github.com/vrcmarcos/elasticmock/pull/64) (Thanks [@carlosgalvez-tiendeo](https://github.com/carlosgalvez-tiendeo)) 189 | - [Add create, update, and delete to bulk API](https://github.com/vrcmarcos/elasticmock/pull/65) (Thanks [@fenimore](https://github.com/fenimore)) 190 | - [Add Should to bool Query](https://github.com/vrcmarcos/elasticmock/pull/67) (Thanks [@fenimore](https://github.com/fenimore)) 191 | - [Update Search API return result](https://github.com/vrcmarcos/elasticmock/pull/68) (Thanks [@fenimore](https://github.com/fenimore)) 192 | 193 | #### 1.7.0: 194 | - [Add shards skipped to search and count](https://github.com/vrcmarcos/elasticmock/pull/56) (Thanks [@philtweir](https://github.com/philtweir)) 195 | - [Allow 'match_all' queries in FakeSearch](https://github.com/vrcmarcos/elasticmock/pull/54) (Thanks [@jankislinger](https://github.com/jankislinger)) 196 | - [Query using nested attributes](https://github.com/vrcmarcos/elasticmock/pull/55) (Thanks [@jankislinger](https://github.com/jankislinger)) 197 | - [New features: range, size, aggregations](https://github.com/vrcmarcos/elasticmock/pull/57) (Thanks [@jankislinger](https://github.com/jankislinger)) 198 | - [Adding "should" and "minimum_should_match" to QueryType](https://github.com/vrcmarcos/elasticmock/pull/62) (Thanks [@lunarie16](https://github.com/lunarie16)) 199 | 200 | #### 1.6.2: 201 | - [Add must to query type](https://github.com/vrcmarcos/elasticmock/pull/47) (Thanks [@cuent](https://github.com/cuent)) 202 | - [Add match all query type](https://github.com/vrcmarcos/elasticmock/pull/48) (Thanks [@cuent](https://github.com/cuent)) 203 | 204 | #### 1.6.1: 205 | - Fix Twine README.md 206 | 207 | #### 1.6.0: 208 | - [Implements several basic search types](https://github.com/vrcmarcos/elasticmock/pull/42) (Thanks [@KyKoPho](https://github.com/KyKoPho)) 209 | - [Allow ignoring of missing documents (404) for get and delete](https://github.com/vrcmarcos/elasticmock/pull/44) (Thanks [@joosterman](https://github.com/joosterman)) 210 | 211 | #### 1.5.1: 212 | - [Fix tests for es > 7](https://github.com/vrcmarcos/elasticmock/pull/38) (Thanks [@chesstrian](https://github.com/chesstrian)) 213 | 214 | #### 1.5.0: 215 | - [**FakeElasticSearch**: Mocked **indices** property](https://github.com/vrcmarcos/elasticmock/issues/22) 216 | - **FakeIndicesClient**: Mocked **create**, **exists**, **refresh** and **delete** methods 217 | - [**FakeElasticSearch**: Mocked **cluster** property](https://github.com/vrcmarcos/elasticmock/issues/8) 218 | - **FakeClusterClient**: Mocked **health** method 219 | 220 | #### 1.4.0 221 | 222 | - [Fix es.index regression issue](https://github.com/vrcmarcos/elasticmock/issues/34) 223 | - [Add 'Force Server Failure' feature as requested](https://github.com/vrcmarcos/elasticmock/issues/28) 224 | - Reformat code to be compliant with PEP8 225 | - Add support to Python 3.8 226 | 227 | #### 1.3.7 228 | 229 | - [Adding fix for updating existing doc using index](https://github.com/vrcmarcos/elasticmock/pull/32) (Thanks [@adityaghosh](https://github.com/adityaghosh)) 230 | - [Added bulk method](https://github.com/vrcmarcos/elasticmock/pull/30) (Thanks [@charl-van-niekerk](https://github.com/charl-van-niekerk)) 231 | - [Add default value to doc_type in index method as it is by default set to '\_doc'](https://github.com/vrcmarcos/elasticmock/pull/27) (Thanks [@mohantyashish109](https://github.com/mohantyashish109)) 232 | - [Add support for Python 3.7](https://github.com/vrcmarcos/elasticmock/pull/25) (Thanks [@asherf](https://github.com/asherf)) 233 | 234 | #### 1.3.6 235 | 236 | - [Fix installation issue](https://github.com/vrcmarcos/elasticmock/pull/20) (Thanks [@tdhopper](https://github.com/tdhopper)) 237 | 238 | #### 1.3.5 239 | 240 | - [Fix 1.3.4 release](https://github.com/vrcmarcos/elasticmock/pull/19) (Thanks [@infinite-Joy](https://github.com/infinite-Joy)) 241 | 242 | #### 1.3.4 243 | 244 | - [Added aggregations to response if requested](https://github.com/vrcmarcos/elasticmock/pull/15) (Thanks [@snakeye](https://github.com/snakeye)) 245 | - [Implementing new methods for scrolling](https://github.com/vrcmarcos/elasticmock/pull/17) (Thanks [@tcatrain](https://github.com/tcatrain)) 246 | 247 | #### 1.3.3 248 | 249 | - [Search: doc_type can be a list](https://github.com/vrcmarcos/elasticmock/pull/16) (Thanks [@garncarz](https://github.com/garncarz)) 250 | - [Exclude tests package](https://github.com/vrcmarcos/elasticmock/pull/13) (Thanks [@jmlw](https://github.com/jmlw)) 251 | - [Make the FakeElasticsearch __init__ signature match the one from Elasticsearch](https://github.com/vrcmarcos/elasticmock/pull/10) (Thanks [@xrmx](https://github.com/xrmx)) 252 | - [Improve search and count](https://github.com/vrcmarcos/elasticmock/pull/7) (Thanks [@frivoire](https://github.com/frivoire)) 253 | 254 | #### 1.3.2 255 | 256 | - **elasticmock**: Python 3 fixes (Thanks [@barseghyanartur](https://github.com/barseghyanartur)) 257 | - **test**: Add information on testing (Thanks [@barseghyanartur](https://github.com/barseghyanartur)) 258 | - **README.md**: Fixed typo (Thanks [@bowlofstew](https://github.com/bowlofstew)) 259 | 260 | #### 1.3.1 261 | 262 | - **elasticmock**: Allow the same arguments to the mock that elasticsearch.Elasticsearch allows (Thanks [@mattbreeden](https://github.com/mattbreeden)) 263 | 264 | #### 1.3.0: 265 | - **FakeElasticSearch**: Mocked **count** method (Thanks [@TheoResources](https://github.com/TheoResources)) 266 | 267 | #### 1.2.0: 268 | - **FakeElasticSearch**: Mocked **suggest** method 269 | 270 | #### 1.1.1: 271 | - **elasticmock**: Changing the cleanup older FakeElasticSearch's instances order 272 | - **FakeElasticSearch.index**: Changing the method signature to correctly overrides the Elasticsearch.index method 273 | 274 | #### 1.1.0: 275 | - **FakeElasticSearch**: Mocked **delete** method 276 | 277 | #### 1.0.1: 278 | - **setup.py**: Fixed GitHub link 279 | 280 | #### 1.0.0: 281 | - **elasticmock**: Created **@elasticmock** decorator 282 | - **FakeElasticSearch**: Mocked **exists**, **get**, **get_source**, **index**, **info**, **search** and **ping** method 283 | -------------------------------------------------------------------------------- /elasticmock/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from functools import wraps 4 | 5 | from elasticsearch.client import _normalize_hosts 6 | from unittest.mock import patch 7 | 8 | from elasticmock.fake_elasticsearch import FakeElasticsearch 9 | 10 | ELASTIC_INSTANCES = {} 11 | 12 | 13 | def _get_elasticmock(hosts=None, *args, **kwargs): 14 | host = _normalize_hosts(hosts)[0] 15 | elastic_key = '{0}:{1}'.format( 16 | host.get('host', 'localhost'), host.get('port', 9200) 17 | ) 18 | 19 | if elastic_key in ELASTIC_INSTANCES: 20 | connection = ELASTIC_INSTANCES.get(elastic_key) 21 | else: 22 | connection = FakeElasticsearch() 23 | ELASTIC_INSTANCES[elastic_key] = connection 24 | return connection 25 | 26 | 27 | def elasticmock(f): 28 | @wraps(f) 29 | def decorated(*args, **kwargs): 30 | ELASTIC_INSTANCES.clear() 31 | with patch('elasticsearch.Elasticsearch', _get_elasticmock): 32 | result = f(*args, **kwargs) 33 | return result 34 | return decorated 35 | -------------------------------------------------------------------------------- /elasticmock/behaviour/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from elasticmock.behaviour import server_failure 4 | 5 | 6 | def disable_all(): 7 | server_failure.disable() 8 | -------------------------------------------------------------------------------- /elasticmock/behaviour/server_failure.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from functools import wraps 4 | 5 | __ENABLED = False 6 | 7 | 8 | def enable(): 9 | global __ENABLED 10 | __ENABLED = True 11 | 12 | 13 | def disable(): 14 | global __ENABLED 15 | __ENABLED = False 16 | 17 | 18 | def server_failure(f): 19 | @wraps(f) 20 | def decorated(*args, **kwargs): 21 | if __ENABLED: 22 | response = { 23 | 'status_code': 500, 24 | 'error': 'Internal Server Error' 25 | } 26 | else: 27 | response = f(*args, **kwargs) 28 | return response 29 | return decorated 30 | -------------------------------------------------------------------------------- /elasticmock/fake_cluster.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from elasticsearch.client.cluster import ClusterClient 4 | from elasticsearch.client.utils import query_params 5 | 6 | 7 | class FakeClusterClient(ClusterClient): 8 | 9 | @query_params('level', 'local', 'master_timeout', 'timeout', 10 | 'wait_for_active_shards', 'wait_for_nodes', 11 | 'wait_for_relocating_shards', 'wait_for_status') 12 | def health(self, index=None, params=None, headers=None): 13 | return { 14 | 'cluster_name': 'testcluster', 15 | 'status': 'green', 16 | 'timed_out': False, 17 | 'number_of_nodes': 1, 18 | 'number_of_data_nodes': 1, 19 | 'active_primary_shards': 1, 20 | 'active_shards': 1, 21 | 'relocating_shards': 0, 22 | 'initializing_shards': 0, 23 | 'unassigned_shards': 1, 24 | 'delayed_unassigned_shards': 0, 25 | 'number_of_pending_tasks': 0, 26 | 'number_of_in_flight_fetch': 0, 27 | 'task_max_waiting_in_queue_millis': 0, 28 | 'active_shards_percent_as_number': 50.0 29 | } 30 | -------------------------------------------------------------------------------- /elasticmock/fake_elasticsearch.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import datetime 3 | import json 4 | import sys 5 | from collections import defaultdict 6 | 7 | import dateutil.parser 8 | from elasticsearch import Elasticsearch 9 | from elasticsearch.client.utils import query_params 10 | from elasticsearch.client import _normalize_hosts 11 | from elasticsearch.transport import Transport 12 | from elasticsearch.exceptions import NotFoundError, RequestError 13 | 14 | from elasticmock.behaviour.server_failure import server_failure 15 | from elasticmock.fake_cluster import FakeClusterClient 16 | from elasticmock.fake_indices import FakeIndicesClient 17 | from elasticmock.utilities import (extract_ignore_as_iterable, get_random_id, 18 | get_random_scroll_id) 19 | from elasticmock.utilities.decorator import for_all_methods 20 | 21 | PY3 = sys.version_info[0] == 3 22 | if PY3: 23 | unicode = str 24 | 25 | 26 | class QueryType: 27 | BOOL = 'BOOL' 28 | FILTER = 'FILTER' 29 | MATCH = 'MATCH' 30 | MATCH_ALL = 'MATCH_ALL' 31 | TERM = 'TERM' 32 | TERMS = 'TERMS' 33 | MUST = 'MUST' 34 | RANGE = 'RANGE' 35 | SHOULD = 'SHOULD' 36 | MINIMUM_SHOULD_MATCH = 'MINIMUM_SHOULD_MATCH' 37 | MULTI_MATCH = 'MULTI_MATCH' 38 | MUST_NOT = 'MUST_NOT' 39 | 40 | @staticmethod 41 | def get_query_type(type_str): 42 | if type_str == 'bool': 43 | return QueryType.BOOL 44 | elif type_str == 'filter': 45 | return QueryType.FILTER 46 | elif type_str == 'match': 47 | return QueryType.MATCH 48 | elif type_str == 'match_all': 49 | return QueryType.MATCH_ALL 50 | elif type_str == 'term': 51 | return QueryType.TERM 52 | elif type_str == 'terms': 53 | return QueryType.TERMS 54 | elif type_str == 'must': 55 | return QueryType.MUST 56 | elif type_str == 'range': 57 | return QueryType.RANGE 58 | elif type_str == 'should': 59 | return QueryType.SHOULD 60 | elif type_str == 'minimum_should_match': 61 | return QueryType.MINIMUM_SHOULD_MATCH 62 | elif type_str == 'multi_match': 63 | return QueryType.MULTI_MATCH 64 | elif type_str == 'must_not': 65 | return QueryType.MUST_NOT 66 | else: 67 | raise NotImplementedError(f'type {type_str} is not implemented for QueryType') 68 | 69 | 70 | class MetricType: 71 | CARDINALITY = "CARDINALITY" 72 | 73 | @staticmethod 74 | def get_metric_type(type_str): 75 | if type_str == "cardinality": 76 | return MetricType.CARDINALITY 77 | else: 78 | raise NotImplementedError(f'type {type_str} is not implemented for MetricType') 79 | 80 | 81 | class FakeQueryCondition: 82 | type = None 83 | condition = None 84 | 85 | def __init__(self, type, condition): 86 | self.type = type 87 | self.condition = condition 88 | 89 | def evaluate(self, document): 90 | return self._evaluate_for_query_type(document) 91 | 92 | def _evaluate_for_query_type(self, document): 93 | if self.type == QueryType.MATCH: 94 | return self._evaluate_for_match_query_type(document) 95 | elif self.type == QueryType.MATCH_ALL: 96 | return True 97 | elif self.type == QueryType.TERM: 98 | return self._evaluate_for_term_query_type(document) 99 | elif self.type == QueryType.TERMS: 100 | return self._evaluate_for_terms_query_type(document) 101 | elif self.type == QueryType.RANGE: 102 | return self._evaluate_for_range_query_type(document) 103 | elif self.type == QueryType.BOOL: 104 | return self._evaluate_for_compound_query_type(document) 105 | elif self.type == QueryType.FILTER: 106 | return self._evaluate_for_compound_query_type(document) 107 | elif self.type == QueryType.MUST: 108 | return self._evaluate_for_compound_query_type(document) 109 | elif self.type == QueryType.SHOULD: 110 | return self._evaluate_for_should_query_type(document) 111 | elif self.type == QueryType.MULTI_MATCH: 112 | return self._evaluate_for_multi_match_query_type(document) 113 | elif self.type == QueryType.MUST_NOT: 114 | return self._evaluate_for_must_not_query_type(document) 115 | else: 116 | raise NotImplementedError('Fake query evaluation not implemented for query type: %s' % self.type) 117 | 118 | def _evaluate_for_match_query_type(self, document): 119 | return self._evaluate_for_field(document, True) 120 | 121 | def _evaluate_for_term_query_type(self, document): 122 | return self._evaluate_for_field(document, False) 123 | 124 | def _evaluate_for_terms_query_type(self, document): 125 | for field in self.condition: 126 | for term in self.condition[field]: 127 | if FakeQueryCondition(QueryType.TERM, {field: term}).evaluate(document): 128 | return True 129 | return False 130 | 131 | def _evaluate_for_field(self, document, ignore_case): 132 | doc_source = document['_source'] 133 | return_val = False 134 | for field, value in self.condition.items(): 135 | return_val = self._compare_value_for_field( 136 | doc_source, 137 | field, 138 | value, 139 | ignore_case 140 | ) 141 | if return_val: 142 | break 143 | return return_val 144 | 145 | def _evaluate_for_fields(self, document): 146 | doc_source = document['_source'] 147 | return_val = False 148 | value = self.condition.get('query') 149 | if not value: 150 | return return_val 151 | fields = self.condition.get('fields', []) 152 | for field in fields: 153 | return_val = self._compare_value_for_field( 154 | doc_source, 155 | field, 156 | value, 157 | True 158 | ) 159 | if return_val: 160 | break 161 | 162 | return return_val 163 | 164 | def _evaluate_for_range_query_type(self, document): 165 | for field, comparisons in self.condition.items(): 166 | doc_val = document['_source'] 167 | for k in field.split("."): 168 | if hasattr(doc_val, k): 169 | doc_val = getattr(doc_val, k) 170 | elif k in doc_val: 171 | doc_val = doc_val[k] 172 | else: 173 | return False 174 | 175 | if isinstance(doc_val, list): 176 | return False 177 | 178 | for sign, value in comparisons.items(): 179 | if isinstance(doc_val, datetime.datetime): 180 | value = dateutil.parser.isoparse(value) 181 | if sign == 'gte': 182 | if doc_val < value: 183 | return False 184 | elif sign == 'gt': 185 | if doc_val <= value: 186 | return False 187 | elif sign == 'lte': 188 | if doc_val > value: 189 | return False 190 | elif sign == 'lt': 191 | if doc_val >= value: 192 | return False 193 | else: 194 | raise ValueError(f"Invalid comparison type {sign}") 195 | return True 196 | 197 | def _evaluate_for_compound_query_type(self, document): 198 | return_val = False 199 | if isinstance(self.condition, dict): 200 | for query_type, sub_query in self.condition.items(): 201 | return_val = FakeQueryCondition( 202 | QueryType.get_query_type(query_type), 203 | sub_query 204 | ).evaluate(document) 205 | if not return_val: 206 | return False 207 | elif isinstance(self.condition, list): 208 | for sub_condition in self.condition: 209 | for sub_condition_key in sub_condition: 210 | return_val = FakeQueryCondition( 211 | QueryType.get_query_type(sub_condition_key), 212 | sub_condition[sub_condition_key] 213 | ).evaluate(document) 214 | if not return_val: 215 | return False 216 | 217 | return return_val 218 | 219 | def _evaluate_for_must_not_query_type(self, document): 220 | if isinstance(self.condition, dict): 221 | for query_type, sub_query in self.condition.items(): 222 | return_val = FakeQueryCondition( 223 | QueryType.get_query_type(query_type), 224 | sub_query 225 | ).evaluate(document) 226 | if return_val: 227 | return False 228 | elif isinstance(self.condition, list): 229 | for sub_condition in self.condition: 230 | for sub_condition_key in sub_condition: 231 | return_val = FakeQueryCondition( 232 | QueryType.get_query_type(sub_condition_key), 233 | sub_condition[sub_condition_key] 234 | ).evaluate(document) 235 | if return_val: 236 | return False 237 | return True 238 | 239 | def _evaluate_for_should_query_type(self, document): 240 | return_val = False 241 | for sub_condition in self.condition: 242 | for sub_condition_key in sub_condition: 243 | return_val = FakeQueryCondition( 244 | QueryType.get_query_type(sub_condition_key), 245 | sub_condition[sub_condition_key] 246 | ).evaluate(document) 247 | if return_val: 248 | return True 249 | return return_val 250 | 251 | def _evaluate_for_multi_match_query_type(self, document): 252 | return self._evaluate_for_fields(document) 253 | 254 | def _compare_value_for_field(self, doc_source, field, value, ignore_case): 255 | if ignore_case and isinstance(value, str): 256 | value = value.lower() 257 | 258 | doc_val = doc_source 259 | # Remove boosting 260 | field, *_ = field.split("*") 261 | for k in field.split("."): 262 | if hasattr(doc_val, k): 263 | doc_val = getattr(doc_val, k) 264 | break 265 | elif k in doc_val: 266 | doc_val = doc_val[k] 267 | break 268 | else: 269 | return False 270 | 271 | if not isinstance(doc_val, list): 272 | doc_val = [doc_val] 273 | 274 | for val in doc_val: 275 | if not isinstance(val, (int, float, complex)) or val is None: 276 | val = str(val) 277 | if ignore_case: 278 | val = val.lower() 279 | 280 | if value == val: 281 | return True 282 | if isinstance(val, str) and str(value) in val: 283 | return True 284 | 285 | return False 286 | 287 | 288 | @for_all_methods([server_failure]) 289 | class FakeElasticsearch(Elasticsearch): 290 | __documents_dict = None 291 | 292 | def __init__(self, hosts=None, transport_class=None, **kwargs): 293 | self.__documents_dict = {} 294 | self.__scrolls = {} 295 | self.transport = Transport(_normalize_hosts(hosts), **kwargs) 296 | 297 | @property 298 | def indices(self): 299 | return FakeIndicesClient(self) 300 | 301 | @property 302 | def cluster(self): 303 | return FakeClusterClient(self) 304 | 305 | @query_params() 306 | def ping(self, params=None, headers=None): 307 | return True 308 | 309 | @query_params() 310 | def info(self, params=None, headers=None): 311 | return { 312 | 'status': 200, 313 | 'cluster_name': 'elasticmock', 314 | 'version': 315 | { 316 | 'lucene_version': '4.10.4', 317 | 'build_hash': '00f95f4ffca6de89d68b7ccaf80d148f1f70e4d4', 318 | 'number': '1.7.5', 319 | 'build_timestamp': '2016-02-02T09:55:30Z', 320 | 'build_snapshot': False 321 | }, 322 | 'name': 'Nightwatch', 323 | 'tagline': 'You Know, for Search' 324 | } 325 | 326 | @query_params('consistency', 327 | 'op_type', 328 | 'parent', 329 | 'refresh', 330 | 'replication', 331 | 'routing', 332 | 'timeout', 333 | 'timestamp', 334 | 'ttl', 335 | 'version', 336 | 'version_type') 337 | def index(self, index, body, doc_type='_doc', id=None, params=None, headers=None): 338 | if index not in self.__documents_dict: 339 | self.__documents_dict[index] = list() 340 | 341 | version = 1 342 | 343 | result = 'created' 344 | if id is None: 345 | id = get_random_id() 346 | 347 | elif self.exists(index, id, doc_type=doc_type, params=params): 348 | doc = self.get(index, id, doc_type=doc_type, params=params) 349 | version = doc['_version'] + 1 350 | self.delete(index, id, doc_type=doc_type) 351 | result = 'updated' 352 | 353 | self.__documents_dict[index].append({ 354 | '_type': doc_type, 355 | '_id': id, 356 | '_source': body, 357 | '_index': index, 358 | '_version': version 359 | }) 360 | 361 | return { 362 | '_type': doc_type, 363 | '_id': id, 364 | 'created': True, 365 | '_version': version, 366 | '_index': index, 367 | 'result': result 368 | } 369 | 370 | @query_params('consistency', 'op_type', 'parent', 'refresh', 'replication', 371 | 'routing', 'timeout', 'timestamp', 'ttl', 'version', 'version_type') 372 | def bulk(self, body, index=None, doc_type=None, params=None, headers=None): 373 | items = [] 374 | errors = False 375 | 376 | for raw_line in body.splitlines(): 377 | if len(raw_line.strip()) > 0: 378 | line = json.loads(raw_line) 379 | 380 | if any(action in line for action in ['index', 'create', 'update', 'delete']): 381 | action = next(iter(line.keys())) 382 | 383 | version = 1 384 | index = line[action].get('_index') or index 385 | doc_type = line[action].get('_type', "_doc") # _type is deprecated in 7.x 386 | 387 | if action in ['delete', 'update'] and not line[action].get("_id"): 388 | raise RequestError(400, 'action_request_validation_exception', 'missing id') 389 | 390 | document_id = line[action].get('_id', get_random_id()) 391 | 392 | if action == 'delete': 393 | status, result, error = self._validate_action( 394 | action, index, document_id, doc_type, params=params 395 | ) 396 | item = {action: { 397 | '_type': doc_type, 398 | '_id': document_id, 399 | '_index': index, 400 | '_version': version, 401 | 'status': status, 402 | }} 403 | if error: 404 | errors = True 405 | item[action]["error"] = result 406 | else: 407 | self.delete(index, document_id, doc_type=doc_type, params=params) 408 | item[action]["result"] = result 409 | items.append(item) 410 | 411 | if index not in self.__documents_dict: 412 | self.__documents_dict[index] = list() 413 | else: 414 | if 'doc' in line and action == 'update': 415 | source = line['doc'] 416 | else: 417 | source = line 418 | status, result, error = self._validate_action( 419 | action, index, document_id, doc_type, params=params 420 | ) 421 | item = { 422 | action: { 423 | '_type': doc_type, 424 | '_id': document_id, 425 | '_index': index, 426 | '_version': version, 427 | 'status': status, 428 | } 429 | } 430 | if not error: 431 | item[action]["result"] = result 432 | if self.exists(index, document_id, doc_type=doc_type, params=params): 433 | doc = self.get(index, document_id, doc_type=doc_type, params=params) 434 | version = doc['_version'] + 1 435 | self.delete(index, document_id, doc_type=doc_type, params=params) 436 | 437 | self.__documents_dict[index].append({ 438 | '_type': doc_type, 439 | '_id': document_id, 440 | '_source': source, 441 | '_index': index, 442 | '_version': version 443 | }) 444 | else: 445 | errors = True 446 | item[action]["error"] = result 447 | items.append(item) 448 | return { 449 | 'errors': errors, 450 | 'items': items 451 | } 452 | 453 | def _validate_action(self, action, index, document_id, doc_type, params=None): 454 | if action in ['index', 'update'] and self.exists(index, id=document_id, doc_type=doc_type, params=params): 455 | return 200, 'updated', False 456 | if action == 'create' and self.exists(index, id=document_id, doc_type=doc_type, params=params): 457 | return 409, 'version_conflict_engine_exception', True 458 | elif action in ['index', 'create'] and not self.exists(index, id=document_id, doc_type=doc_type, params=params): 459 | return 201, 'created', False 460 | elif action == "delete" and self.exists(index, id=document_id, doc_type=doc_type, params=params): 461 | return 200, 'deleted', False 462 | elif action == 'update' and not self.exists(index, id=document_id, doc_type=doc_type, params=params): 463 | return 404, 'document_missing_exception', True 464 | elif action == 'delete' and not self.exists(index, id=document_id, doc_type=doc_type, params=params): 465 | return 404, 'not_found', True 466 | else: 467 | raise NotImplementedError(f"{action} behaviour hasn't been implemented") 468 | 469 | @query_params('parent', 'preference', 'realtime', 'refresh', 'routing') 470 | def exists(self, index, id, doc_type=None, params=None, headers=None): 471 | result = False 472 | if index in self.__documents_dict: 473 | for document in self.__documents_dict[index]: 474 | if document.get('_id') == id and document.get('_type') == doc_type: 475 | result = True 476 | break 477 | return result 478 | 479 | @query_params('_source', '_source_exclude', '_source_include', 'fields', 480 | 'parent', 'preference', 'realtime', 'refresh', 'routing', 'version', 481 | 'version_type') 482 | def get(self, index, id, doc_type='_all', params=None, headers=None): 483 | ignore = extract_ignore_as_iterable(params) 484 | result = None 485 | 486 | if index in self.__documents_dict: 487 | for document in self.__documents_dict[index]: 488 | if document.get('_id') == id: 489 | if doc_type == '_all': 490 | result = document 491 | break 492 | else: 493 | if document.get('_type') == doc_type: 494 | result = document 495 | break 496 | 497 | if result: 498 | result['found'] = True 499 | return result 500 | elif params and 404 in ignore: 501 | return {'found': False} 502 | else: 503 | error_data = { 504 | '_index': index, 505 | '_type': doc_type, 506 | '_id': id, 507 | 'found': False 508 | } 509 | raise NotFoundError(404, json.dumps(error_data)) 510 | 511 | @query_params( 512 | "_source", 513 | "_source_excludes", 514 | "_source_includes", 515 | "allow_no_indices", 516 | "analyze_wildcard", 517 | "analyzer", 518 | "conflicts", 519 | "default_operator", 520 | "df", 521 | "expand_wildcards", 522 | "from_", 523 | "ignore_unavailable", 524 | "lenient", 525 | "max_docs", 526 | "pipeline", 527 | "preference", 528 | "q", 529 | "refresh", 530 | "request_cache", 531 | "requests_per_second", 532 | "routing", 533 | "scroll", 534 | "scroll_size", 535 | "search_timeout", 536 | "search_type", 537 | "size", 538 | "slices", 539 | "sort", 540 | "stats", 541 | "terminate_after", 542 | "timeout", 543 | "version", 544 | "version_type", 545 | "wait_for_active_shards", 546 | "wait_for_completion", 547 | ) 548 | def update_by_query( 549 | self, index, body=None, doc_type=None, params=None, headers=None 550 | ): 551 | # Actually it only supports script equal operations 552 | # TODO: Full support from painless language 553 | total_updated = 0 554 | if isinstance(index, list): 555 | index, = index 556 | new_values = {} 557 | script_params = body['script']['params'] 558 | script_source = body['script']['source'] \ 559 | .replace('ctx._source.', '') \ 560 | .split(';') 561 | for sentence in script_source: 562 | if sentence: 563 | field, _, value = sentence.split() 564 | if value.startswith('params.'): 565 | _, key = value.split('.') 566 | value = script_params.get(key) 567 | new_values[field] = value 568 | 569 | matches = self.search(index=index, doc_type=doc_type, body=body, 570 | params=params, headers=headers) 571 | if matches['hits']['total']: 572 | for hit in matches['hits']['hits']: 573 | body = hit['_source'] 574 | body.update(new_values) 575 | self.index(index, body, doc_type=hit['_type'], id=hit['_id']) 576 | total_updated += 1 577 | 578 | return { 579 | 'took': 1, 580 | 'time_out': False, 581 | 'total': matches['hits']['total'], 582 | 'updated': total_updated, 583 | 'deleted': 0, 584 | 'batches': 1, 585 | 'version_conflicts': 0, 586 | 'noops': 0, 587 | 'retries': 0, 588 | 'throttled_millis': 100, 589 | 'requests_per_second': 100, 590 | 'throttled_until_millis': 0, 591 | 'failures': [] 592 | } 593 | 594 | 595 | @query_params('_source', '_source_exclude', '_source_include', 596 | 'preference', 'realtime', 'refresh', 'routing', 597 | 'stored_fields') 598 | def mget(self, body, index, doc_type='_all', params=None, headers=None): 599 | docs = body.get('docs') 600 | ids = [doc['_id'] for doc in docs] 601 | results = [] 602 | for id in ids: 603 | try: 604 | results.append(self.get(index, id, doc_type=doc_type, 605 | params=params, headers=headers)) 606 | except: 607 | pass 608 | if not results: 609 | raise RequestError( 610 | 400, 611 | 'action_request_validation_exception', 612 | 'Validation Failed: 1: no documents to get;' 613 | ) 614 | return {'docs': results} 615 | 616 | @query_params('_source', '_source_exclude', '_source_include', 'parent', 617 | 'preference', 'realtime', 'refresh', 'routing', 'version', 618 | 'version_type') 619 | def get_source(self, index, doc_type, id, params=None, headers=None): 620 | document = self.get(index=index, doc_type=doc_type, id=id, params=params) 621 | return document.get('_source') 622 | 623 | @query_params('_source', '_source_exclude', '_source_include', 624 | 'allow_no_indices', 'analyze_wildcard', 'analyzer', 'default_operator', 625 | 'df', 'expand_wildcards', 'explain', 'fielddata_fields', 'fields', 626 | 'from_', 'ignore_unavailable', 'lenient', 'lowercase_expanded_terms', 627 | 'preference', 'q', 'request_cache', 'routing', 'scroll', 'search_type', 628 | 'size', 'sort', 'stats', 'suggest_field', 'suggest_mode', 629 | 'suggest_size', 'suggest_text', 'terminate_after', 'timeout', 630 | 'track_scores', 'version') 631 | def count(self, index=None, doc_type=None, body=None, params=None, headers=None): 632 | searchable_indexes = self._normalize_index_to_list(index) 633 | 634 | i = 0 635 | for searchable_index in searchable_indexes: 636 | for document in self.__documents_dict[searchable_index]: 637 | if doc_type and document.get('_type') != doc_type: 638 | continue 639 | i += 1 640 | result = { 641 | 'count': i, 642 | '_shards': { 643 | 'successful': 1, 644 | 'skipped': 0, 645 | 'failed': 0, 646 | 'total': 1 647 | } 648 | } 649 | 650 | return result 651 | 652 | def _get_fake_query_condition(self, query_type_str, condition): 653 | return FakeQueryCondition(QueryType.get_query_type(query_type_str), condition) 654 | 655 | @query_params( 656 | "ccs_minimize_roundtrips", 657 | "max_concurrent_searches", 658 | "max_concurrent_shard_requests", 659 | "pre_filter_shard_size", 660 | "rest_total_hits_as_int", 661 | "search_type", 662 | "typed_keys", 663 | ) 664 | def msearch(self, body, index=None, doc_type=None, params=None, headers=None): 665 | def grouped(iterable): 666 | if len(iterable) % 2 != 0: 667 | raise Exception('Malformed body') 668 | iterator = iter(iterable) 669 | while True: 670 | try: 671 | yield (next(iterator)['index'], next(iterator)) 672 | except StopIteration: 673 | break 674 | 675 | responses = [] 676 | took = 0 677 | for ind, query in grouped(body): 678 | response = self.search(index=ind, body=query) 679 | took += response['took'] 680 | responses.append(response) 681 | result = { 682 | 'took': took, 683 | 'responses': responses 684 | } 685 | return result 686 | 687 | @query_params('_source', '_source_exclude', '_source_include', 688 | 'allow_no_indices', 'analyze_wildcard', 'analyzer', 'default_operator', 689 | 'df', 'expand_wildcards', 'explain', 'fielddata_fields', 'fields', 690 | 'from_', 'ignore_unavailable', 'lenient', 'lowercase_expanded_terms', 691 | 'preference', 'q', 'request_cache', 'routing', 'scroll', 'search_type', 692 | 'size', 'sort', 'stats', 'suggest_field', 'suggest_mode', 693 | 'suggest_size', 'suggest_text', 'terminate_after', 'timeout', 694 | 'track_scores', 'version') 695 | def search(self, index=None, doc_type=None, body=None, params=None, headers=None): 696 | searchable_indexes = self._normalize_index_to_list(index) 697 | 698 | matches = [] 699 | conditions = [] 700 | 701 | if body and 'query' in body: 702 | query = body['query'] 703 | for query_type_str, condition in query.items(): 704 | conditions.append(self._get_fake_query_condition(query_type_str, condition)) 705 | for searchable_index in searchable_indexes: 706 | 707 | for document in self.__documents_dict[searchable_index]: 708 | 709 | if doc_type: 710 | if isinstance(doc_type, list) and document.get('_type') not in doc_type: 711 | continue 712 | if isinstance(doc_type, str) and document.get('_type') != doc_type: 713 | continue 714 | if conditions: 715 | for condition in conditions: 716 | if condition.evaluate(document): 717 | matches.append(document) 718 | break 719 | else: 720 | matches.append(document) 721 | 722 | for match in matches: 723 | self._find_and_convert_data_types(match['_source']) 724 | 725 | result = { 726 | 'hits': { 727 | 'total': {'value': len(matches), 'relation': 'eq'}, 728 | 'max_score': 1.0 729 | }, 730 | '_shards': { 731 | # Simulate indexes with 1 shard each 732 | 'successful': len(searchable_indexes), 733 | 'skipped': 0, 734 | 'failed': 0, 735 | 'total': len(searchable_indexes) 736 | }, 737 | 'took': 1, 738 | 'timed_out': False 739 | } 740 | 741 | hits = [] 742 | for match in matches: 743 | match['_score'] = 1.0 744 | hits.append(match) 745 | 746 | # build aggregations 747 | if body is not None and 'aggs' in body: 748 | aggregations = {} 749 | 750 | for aggregation, definition in body['aggs'].items(): 751 | aggregations[aggregation] = { 752 | "doc_count_error_upper_bound": 0, 753 | "sum_other_doc_count": 0, 754 | "buckets": self.make_aggregation_buckets(definition, matches) 755 | } 756 | 757 | if aggregations: 758 | result['aggregations'] = aggregations 759 | 760 | if 'scroll' in params: 761 | result['_scroll_id'] = str(get_random_scroll_id()) 762 | params['size'] = int(params.get('size', 10)) 763 | params['from'] = int(params.get('from') + params.get('size') if 'from' in params else 0) 764 | self.__scrolls[result.get('_scroll_id')] = { 765 | 'index': index, 766 | 'doc_type': doc_type, 767 | 'body': body, 768 | 'params': params 769 | } 770 | hits = hits[params.get('from'):params.get('from') + params.get('size')] 771 | elif 'size' in params: 772 | hits = hits[:int(params['size'])] 773 | elif body and 'size' in body: 774 | hits = hits[:int(body['size'])] 775 | 776 | result['hits']['hits'] = hits 777 | 778 | return result 779 | 780 | @query_params('scroll') 781 | def scroll(self, scroll_id, params=None, headers=None): 782 | scroll = self.__scrolls.pop(scroll_id) 783 | result = self.search( 784 | index=scroll.get('index'), 785 | doc_type=scroll.get('doc_type'), 786 | body=scroll.get('body'), 787 | params=scroll.get('params') 788 | ) 789 | return result 790 | 791 | @query_params('consistency', 'parent', 'refresh', 'replication', 'routing', 792 | 'timeout', 'version', 'version_type') 793 | def delete(self, index, id, doc_type=None, params=None, headers=None): 794 | 795 | found = False 796 | ignore = extract_ignore_as_iterable(params) 797 | 798 | if index in self.__documents_dict: 799 | for document in self.__documents_dict[index]: 800 | if document.get('_id') == id: 801 | found = True 802 | if doc_type and document.get('_type') != doc_type: 803 | found = False 804 | if found: 805 | self.__documents_dict[index].remove(document) 806 | break 807 | 808 | result_dict = { 809 | 'found': found, 810 | '_index': index, 811 | '_type': doc_type, 812 | '_id': id, 813 | '_version': 1, 814 | } 815 | 816 | if found: 817 | return result_dict 818 | elif params and 404 in ignore: 819 | return {'found': False} 820 | else: 821 | raise NotFoundError(404, json.dumps(result_dict)) 822 | 823 | @query_params('allow_no_indices', 'expand_wildcards', 'ignore_unavailable', 824 | 'preference', 'routing') 825 | def suggest(self, body, index=None, params=None, headers=None): 826 | if index is not None and index not in self.__documents_dict: 827 | raise NotFoundError(404, 'IndexMissingException[[{0}] missing]'.format(index)) 828 | 829 | result_dict = {} 830 | for key, value in body.items(): 831 | text = value.get('text') 832 | suggestion = int(text) + 1 if isinstance(text, int) else '{0}_suggestion'.format(text) 833 | result_dict[key] = [ 834 | { 835 | 'text': text, 836 | 'length': 1, 837 | 'options': [ 838 | { 839 | 'text': suggestion, 840 | 'freq': 1, 841 | 'score': 1.0 842 | } 843 | ], 844 | 'offset': 0 845 | } 846 | ] 847 | return result_dict 848 | 849 | def _normalize_index_to_list(self, index): 850 | # Ensure to have a list of index 851 | if index is None: 852 | searchable_indexes = self.__documents_dict.keys() 853 | elif isinstance(index, str) or isinstance(index, unicode): 854 | searchable_indexes = [index] 855 | elif isinstance(index, list): 856 | searchable_indexes = index 857 | else: 858 | # Is it the correct exception to use ? 859 | raise ValueError("Invalid param 'index'") 860 | 861 | # Check index(es) exists 862 | for searchable_index in searchable_indexes: 863 | if searchable_index not in self.__documents_dict: 864 | raise NotFoundError(404, 'IndexMissingException[[{0}] missing]'.format(searchable_index)) 865 | 866 | return searchable_indexes 867 | 868 | @classmethod 869 | def _find_and_convert_data_types(cls, document): 870 | for key, value in document.items(): 871 | if isinstance(value, dict): 872 | cls._find_and_convert_data_types(value) 873 | elif isinstance(value, datetime.datetime): 874 | document[key] = value.isoformat() 875 | 876 | def make_aggregation_buckets(self, aggregation, documents): 877 | if 'composite' in aggregation: 878 | return self.make_composite_aggregation_buckets(aggregation, documents) 879 | return [] 880 | 881 | def make_composite_aggregation_buckets(self, aggregation, documents): 882 | 883 | def make_key(doc_source, agg_source): 884 | attr = list(agg_source.values())[0]["terms"]["field"] 885 | return doc_source[attr] 886 | 887 | def make_bucket(bucket_key, bucket): 888 | out = { 889 | "key": {k: v for k, v in zip(bucket_key_fields, bucket_key)}, 890 | "doc_count": len(bucket), 891 | } 892 | 893 | for metric_key, metric_definition in aggregation["aggs"].items(): 894 | metric_type_str = list(metric_definition)[0] 895 | metric_type = MetricType.get_metric_type(metric_type_str) 896 | attr = metric_definition[metric_type_str]["field"] 897 | data = [doc[attr] for doc in bucket] 898 | 899 | if metric_type == MetricType.CARDINALITY: 900 | value = len(set(data)) 901 | else: 902 | raise NotImplementedError(f"Metric type '{metric_type}' not implemented") 903 | 904 | out[metric_key] = {"value": value} 905 | return out 906 | 907 | agg_sources = aggregation["composite"]["sources"] 908 | buckets = defaultdict(list) 909 | bucket_key_fields = [list(src)[0] for src in agg_sources] 910 | for document in documents: 911 | doc_src = document["_source"] 912 | key = tuple(make_key(doc_src, agg_src) for agg_src in aggregation["composite"]["sources"]) 913 | buckets[key].append(doc_src) 914 | 915 | buckets = sorted(((k, v) for k, v in buckets.items()), key=lambda x: x[0]) 916 | buckets = [make_bucket(bucket_key, bucket) for bucket_key, bucket in buckets] 917 | return buckets 918 | -------------------------------------------------------------------------------- /elasticmock/fake_indices.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from elasticsearch.client.indices import IndicesClient 4 | from elasticsearch.client.utils import query_params 5 | 6 | 7 | class FakeIndicesClient(IndicesClient): 8 | 9 | @query_params('master_timeout', 'timeout') 10 | def create(self, index, body=None, params=None, headers=None, *args, **kwargs): 11 | documents_dict = self.__get_documents_dict() 12 | if index not in documents_dict: 13 | documents_dict[index] = [] 14 | 15 | @query_params('allow_no_indices', 'expand_wildcards', 'ignore_unavailable', 16 | 'local') 17 | def exists(self, index, params=None, headers=None): 18 | return index in self.__get_documents_dict() 19 | 20 | @query_params('allow_no_indices', 'expand_wildcards', 'force', 21 | 'ignore_unavailable', 'operation_threading') 22 | def refresh(self, index=None, params=None, headers=None): 23 | pass 24 | 25 | @query_params('master_timeout', 'timeout') 26 | def delete(self, index, params=None, headers=None): 27 | documents_dict = self.__get_documents_dict() 28 | if index in documents_dict: 29 | del documents_dict[index] 30 | 31 | def __get_documents_dict(self): 32 | return self.client._FakeElasticsearch__documents_dict 33 | -------------------------------------------------------------------------------- /elasticmock/utilities/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | import base64 4 | import random 5 | import string 6 | 7 | DEFAULT_ELASTICSEARCH_ID_SIZE = 20 8 | CHARSET_FOR_ELASTICSEARCH_ID = string.ascii_letters + string.digits 9 | 10 | DEFAULT_ELASTICSEARCH_SEARCHRESULTPHASE_COUNT = 6 11 | 12 | 13 | def get_random_id(size=DEFAULT_ELASTICSEARCH_ID_SIZE): 14 | return ''.join(random.choice(CHARSET_FOR_ELASTICSEARCH_ID) for _ in range(size)) 15 | 16 | 17 | def get_random_scroll_id(size=DEFAULT_ELASTICSEARCH_SEARCHRESULTPHASE_COUNT): 18 | return base64.b64encode(''.join(get_random_id() for _ in range(size)).encode()) 19 | 20 | 21 | def extract_ignore_as_iterable(params): 22 | """Extracts the value of the ignore parameter as iterable""" 23 | ignore = params.get('ignore', ()) 24 | if isinstance(ignore, int): 25 | ignore = (ignore,) 26 | return ignore 27 | -------------------------------------------------------------------------------- /elasticmock/utilities/decorator.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | 4 | def for_all_methods(decorators, apply_on_public_only=True): 5 | def decorate(cls): 6 | for attr in cls.__dict__: 7 | 8 | if apply_on_public_only: 9 | if attr.startswith('_'): 10 | continue 11 | 12 | if callable(getattr(cls, attr)): 13 | for decorator in decorators: 14 | setattr(cls, attr, decorator(getattr(cls, attr))) 15 | return cls 16 | return decorate 17 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | elasticsearch>=1.9.0,<8.0.0 2 | ipdb 3 | python-dateutil -------------------------------------------------------------------------------- /requirements_test.txt: -------------------------------------------------------------------------------- 1 | tox 2 | parameterized -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [tool:pytest] 2 | python_files=test*.py 3 | 4 | [coverage:run] 5 | omit = 6 | */tests/* -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | import setuptools 4 | 5 | __version__ = '1.8.1' 6 | 7 | # read the contents of your readme file 8 | from os import path 9 | this_directory = path.abspath(path.dirname(__file__)) 10 | with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f: 11 | long_description = f.read() 12 | 13 | setuptools.setup( 14 | name='ElasticMock', 15 | version=__version__, 16 | author='Marcos Cardoso', 17 | author_email='vrcmarcos@gmail.com', 18 | description='Python Elasticsearch Mock for test purposes', 19 | long_description=long_description, 20 | long_description_content_type='text/markdown', 21 | url='https://github.com/vrcmarcos/elasticmock', 22 | packages=setuptools.find_packages(exclude=('tests')), 23 | install_requires=[ 24 | 'elasticsearch<8.0.0', 25 | 'python-dateutil', 26 | ], 27 | classifiers=[ 28 | 'Environment :: Web Environment', 29 | 'Intended Audience :: Developers', 30 | 'Operating System :: OS Independent', 31 | 'Programming Language :: Python', 32 | 'Programming Language :: Python :: 3', 33 | 'Programming Language :: Python :: 3.6', 34 | 'Programming Language :: Python :: 3.7', 35 | 'Programming Language :: Python :: 3.8', 36 | 'Programming Language :: Python :: 3.9', 37 | "License :: OSI Approved :: MIT License", 38 | 'Topic :: Software Development :: Libraries :: Python Modules' 39 | ] 40 | ) 41 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | import unittest 4 | from datetime import datetime 5 | 6 | import elasticsearch 7 | 8 | from elasticmock import elasticmock 9 | 10 | INDEX_NAME = 'test_index' 11 | DOC_TYPE = 'doc-Type' 12 | DOC_ID = 'doc-id' 13 | BODY = { 14 | 'author': 'kimchy', 15 | 'text': 'Elasticsearch: cool. bonsai cool.', 16 | 'timestamp': datetime.now(), 17 | } 18 | 19 | 20 | class TestElasticmock(unittest.TestCase): 21 | 22 | @elasticmock 23 | def setUp(self): 24 | self.es = elasticsearch.Elasticsearch(hosts=[{'host': 'localhost', 'port': 9200}]) 25 | -------------------------------------------------------------------------------- /tests/fake_cluster/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | -------------------------------------------------------------------------------- /tests/fake_cluster/test_health.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from tests import TestElasticmock 4 | 5 | 6 | class TestHealth(TestElasticmock): 7 | 8 | def test_should_return_health(self): 9 | health_status = self.es.cluster.health() 10 | 11 | expected_health_status = { 12 | 'cluster_name': 'testcluster', 13 | 'status': 'green', 14 | 'timed_out': False, 15 | 'number_of_nodes': 1, 16 | 'number_of_data_nodes': 1, 17 | 'active_primary_shards': 1, 18 | 'active_shards': 1, 19 | 'relocating_shards': 0, 20 | 'initializing_shards': 0, 21 | 'unassigned_shards': 1, 22 | 'delayed_unassigned_shards': 0, 23 | 'number_of_pending_tasks': 0, 24 | 'number_of_in_flight_fetch': 0, 25 | 'task_max_waiting_in_queue_millis': 0, 26 | 'active_shards_percent_as_number': 50.0 27 | } 28 | 29 | self.assertDictEqual(expected_health_status, health_status) 30 | -------------------------------------------------------------------------------- /tests/fake_elasticsearch/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | -------------------------------------------------------------------------------- /tests/fake_elasticsearch/behaviour/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from elasticmock import behaviour 4 | from tests import TestElasticmock 5 | 6 | 7 | class TestElasticmockBehaviour(TestElasticmock): 8 | 9 | def tearDown(self): 10 | behaviour.disable_all() 11 | -------------------------------------------------------------------------------- /tests/fake_elasticsearch/behaviour/test_server_failure.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from elasticmock import behaviour 4 | from tests.fake_elasticsearch.behaviour import TestElasticmockBehaviour 5 | from tests import INDEX_NAME, DOC_TYPE, BODY 6 | 7 | 8 | class TestBehaviourServerFailure(TestElasticmockBehaviour): 9 | 10 | def test_should_return_internal_server_error_when_simulate_server_error_is_true(self): 11 | behaviour.server_failure.enable() 12 | data = self.es.index(index=INDEX_NAME, doc_type=DOC_TYPE, body=BODY) 13 | 14 | expected = { 15 | 'status_code': 500, 16 | 'error': 'Internal Server Error' 17 | } 18 | 19 | self.assertDictEqual(expected, data) 20 | -------------------------------------------------------------------------------- /tests/fake_elasticsearch/test_bulk.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | import json 4 | 5 | from tests import TestElasticmock, INDEX_NAME, DOC_TYPE, BODY, DOC_ID 6 | 7 | 8 | class TestBulk(TestElasticmock): 9 | 10 | def test_should_bulk_index_documents_index_creates(self): 11 | action = {'index': {'_index': INDEX_NAME, '_type': DOC_TYPE}} 12 | action_json = json.dumps(action) 13 | body_json = json.dumps(BODY, default=str) 14 | num_of_documents = 10 15 | 16 | lines = [] 17 | for count in range(0, num_of_documents): 18 | lines.append(action_json) 19 | lines.append(body_json) 20 | body = '\n'.join(lines) 21 | 22 | data = self.es.bulk(body=body) 23 | items = data.get('items') 24 | 25 | self.assertFalse(data.get('errors')) 26 | self.assertEqual(num_of_documents, len(items)) 27 | 28 | for item in items: 29 | index = item.get('index') 30 | self.assertEqual(DOC_TYPE, index.get('_type')) 31 | self.assertEqual(INDEX_NAME, index.get('_index')) 32 | self.assertEqual('created', index.get('result')) 33 | self.assertEqual(201, index.get('status')) 34 | 35 | def test_should_bulk_index_documents_create_creates(self): 36 | create_action = {'create': {'_index': INDEX_NAME, '_type': DOC_TYPE}} 37 | create_with_id = {'create': {'_index': INDEX_NAME, '_type': DOC_TYPE, '_id': DOC_ID}} 38 | actions = [ 39 | json.dumps(create_action), 40 | json.dumps(BODY, default=str), 41 | json.dumps(create_action), 42 | json.dumps(BODY, default=str), 43 | json.dumps(create_with_id), 44 | json.dumps(BODY, default=str), 45 | # Will fail on created document with the same ID 46 | json.dumps(create_with_id), 47 | json.dumps(BODY, default=str), 48 | ] 49 | body = '\n'.join(actions) 50 | 51 | data = self.es.bulk(body=body) 52 | 53 | items = data.get('items') 54 | 55 | self.assertTrue(data.get('errors')) 56 | self.assertEqual(4, len(items)) 57 | 58 | last_item = items.pop() 59 | self.assertEqual(last_item['create']['error'], 'version_conflict_engine_exception') 60 | self.assertEqual(last_item['create']['status'], 409) 61 | for item in items: 62 | index = item.get('create') 63 | self.assertEqual(DOC_TYPE, index.get('_type')) 64 | self.assertEqual(INDEX_NAME, index.get('_index')) 65 | self.assertEqual('created', index.get('result')) 66 | self.assertEqual(201, index.get('status')) 67 | 68 | def test_should_bulk_index_documents_index_updates(self): 69 | action = {'index': {'_index': INDEX_NAME, '_id': DOC_ID, '_type': DOC_TYPE}} 70 | action_json = json.dumps(action) 71 | body_json = json.dumps(BODY, default=str) 72 | num_of_documents = 10 73 | 74 | lines = [] 75 | for count in range(0, num_of_documents): 76 | lines.append(action_json) 77 | lines.append(body_json) 78 | body = '\n'.join(lines) 79 | 80 | data = self.es.bulk(body=body) 81 | items = data.get('items') 82 | 83 | self.assertFalse(data.get('errors')) 84 | self.assertEqual(num_of_documents, len(items)) 85 | 86 | first_item = items.pop(0) 87 | self.assertEqual(first_item["index"]["status"], 201) 88 | self.assertEqual(first_item["index"]["result"], "created") 89 | 90 | for item in items: 91 | index = item.get('index') 92 | self.assertEqual(DOC_TYPE, index.get('_type')) 93 | self.assertEqual(INDEX_NAME, index.get('_index')) 94 | self.assertEqual('updated', index.get('result')) 95 | self.assertEqual(200, index.get('status')) 96 | 97 | def test_should_bulk_index_documents_update_updates(self): 98 | action = {'update': {'_index': INDEX_NAME, '_id': DOC_ID, '_type': DOC_TYPE}} 99 | action_json = json.dumps(action) 100 | create_action_json = json.dumps( 101 | {'create': {'_index': INDEX_NAME, '_id': DOC_ID, '_type': DOC_TYPE}} 102 | ) 103 | body_json = json.dumps({'doc': BODY}, default=str) 104 | num_of_documents = 4 105 | 106 | lines = [create_action_json, json.dumps(BODY, default=str)] 107 | for count in range(0, num_of_documents): 108 | lines.append(action_json) 109 | lines.append(body_json) 110 | body = '\n'.join(lines) 111 | 112 | data = self.es.bulk(body=body) 113 | items = data.get('items') 114 | 115 | self.assertFalse(data.get('errors')) 116 | self.assertEqual(num_of_documents + 1, len(items)) 117 | 118 | first_item = items.pop(0) 119 | self.assertEqual(first_item["create"]["status"], 201) 120 | self.assertEqual(first_item["create"]["result"], "created") 121 | 122 | for item in items: 123 | index = item.get('update') 124 | self.assertEqual(DOC_TYPE, index.get('_type')) 125 | self.assertEqual(INDEX_NAME, index.get('_index')) 126 | self.assertEqual('updated', index.get('result')) 127 | self.assertEqual(200, index.get('status')) 128 | 129 | def test_should_bulk_index_documents_delete_deletes(self): 130 | delete_action = {'delete': {'_index': INDEX_NAME, '_id': DOC_ID, '_type': DOC_TYPE}} 131 | delete_action_json = json.dumps(delete_action) 132 | create_action_json = json.dumps( 133 | {'create': {'_index': INDEX_NAME, '_id': DOC_ID, '_type': DOC_TYPE}} 134 | ) 135 | 136 | lines = [ 137 | create_action_json, 138 | json.dumps(BODY, default=str), 139 | delete_action_json, 140 | ] 141 | body = '\n'.join(lines) 142 | 143 | data = self.es.bulk(body=body) 144 | items = data.get('items') 145 | 146 | self.assertFalse(data.get('errors')) 147 | self.assertEqual(2, len(items)) 148 | 149 | first_item = items.pop(0) 150 | self.assertEqual(first_item["create"]["status"], 201) 151 | self.assertEqual(first_item["create"]["result"], "created") 152 | self.assertEqual(first_item["create"]['_type'], DOC_TYPE) 153 | self.assertEqual(first_item["create"]['_id'], DOC_ID) 154 | 155 | second_item = items.pop(0) 156 | self.assertEqual(second_item["delete"]["status"], 200) 157 | self.assertEqual(second_item["delete"]["result"], "deleted") 158 | self.assertEqual(second_item["delete"]['_type'], DOC_TYPE) 159 | self.assertEqual(second_item["delete"]['_id'], DOC_ID) 160 | 161 | def test_should_bulk_index_documents_mixed_actions(self): 162 | doc_body = json.dumps(BODY, default=str) 163 | 164 | doc_id_1 = 1 165 | doc_id_2 = 2 166 | actions = [ 167 | json.dumps({'create': {'_index': INDEX_NAME, '_type': DOC_TYPE, '_id': doc_id_1}}), 168 | doc_body, # 201 169 | json.dumps({'create': {'_index': INDEX_NAME, '_type': DOC_TYPE, '_id': doc_id_1}}), 170 | doc_body, # 409 171 | json.dumps({'index': {'_index': INDEX_NAME, '_type': DOC_TYPE, '_id': doc_id_2}}), 172 | doc_body, # 201 173 | json.dumps({'index': {'_index': INDEX_NAME, '_type': DOC_TYPE, '_id': doc_id_2}}), 174 | doc_body, # 200 175 | json.dumps({'update': {'_index': INDEX_NAME, '_type': DOC_TYPE, '_id': doc_id_1}}), 176 | doc_body, # 200 177 | json.dumps({'delete': {'_index': INDEX_NAME, '_type': DOC_TYPE, '_id': doc_id_1}}), 178 | # 200 179 | json.dumps({'update': {'_index': INDEX_NAME, '_type': DOC_TYPE, '_id': doc_id_1}}), 180 | doc_body, # 404 181 | json.dumps({'delete': {'_index': INDEX_NAME, '_type': DOC_TYPE, '_id': doc_id_1}}), 182 | # 404 183 | ] 184 | body = '\n'.join(actions) 185 | 186 | data = self.es.bulk(body=body) 187 | 188 | expected = [ 189 | {'create': {'_type': DOC_TYPE, '_id': 1, '_index': INDEX_NAME, 190 | '_version': 1, 'status': 201, 'result': 'created'}}, 191 | {'create': {'_type': DOC_TYPE, '_id': 1, '_index': INDEX_NAME, 192 | '_version': 1, 'status': 409, 193 | 'error': 'version_conflict_engine_exception'}}, 194 | {'index': {'_type': DOC_TYPE, '_id': 2, '_index': INDEX_NAME, 195 | '_version': 1, 'status': 201, 'result': 'created'}}, 196 | {'index': {'_type': DOC_TYPE, '_id': 2, '_index': INDEX_NAME, 197 | '_version': 1, 'status': 200, 'result': 'updated'}}, 198 | {'update': {'_type': DOC_TYPE, '_id': 1, '_index': INDEX_NAME, 199 | '_version': 1, 'status': 200, 'result': 'updated'}}, 200 | {'delete': {'_type': DOC_TYPE, '_id': 1, '_index': INDEX_NAME, 201 | '_version': 1, 'result': 'deleted', 'status': 200}}, 202 | {'update': {'_type': DOC_TYPE, '_id': 1, '_index': INDEX_NAME, 203 | '_version': 1, 'status': 404, 'error': 'document_missing_exception'}}, 204 | {'delete': {'_type': DOC_TYPE, '_id': 1, '_index': INDEX_NAME, 205 | '_version': 1, 'error': 'not_found', 'status': 404}}, 206 | ] 207 | 208 | actual = data.get('items') 209 | 210 | self.assertTrue(data.get('errors')) 211 | self.assertEqual(actual, expected) 212 | -------------------------------------------------------------------------------- /tests/fake_elasticsearch/test_count.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from tests import TestElasticmock, DOC_TYPE 4 | 5 | 6 | class TestCount(TestElasticmock): 7 | 8 | def test_should_return_count_for_indexed_documents_on_index(self): 9 | index_quantity = 0 10 | for i in range(0, index_quantity): 11 | self.es.index(index='index_{0}'.format(i), doc_type=DOC_TYPE, body={'data': 'test_{0}'.format(i)}) 12 | 13 | count = self.es.count() 14 | self.assertEqual(index_quantity, count.get('count')) 15 | 16 | def test_should_count_in_multiple_indexes(self): 17 | self.es.index(index='groups', doc_type='groups', body={'budget': 1000}) 18 | self.es.index(index='users', doc_type='users', body={'name': 'toto'}) 19 | self.es.index(index='pcs', doc_type='pcs', body={'model': 'macbook'}) 20 | 21 | result = self.es.count(index=['users', 'pcs']) 22 | self.assertEqual(2, result.get('count')) 23 | 24 | def test_should_count_with_empty_doc_types(self): 25 | self.es.index(index='index', doc_type=DOC_TYPE, body={'data': 'test'}) 26 | count = self.es.count(doc_type=[]) 27 | self.assertEqual(1, count.get('count')) 28 | 29 | def test_should_return_skipped_shards(self): 30 | self.es.index(index='index', doc_type=DOC_TYPE, body={'data': 'test'}) 31 | count = self.es.count(doc_type=[]) 32 | self.assertEqual(0, count.get('_shards').get('skipped')) 33 | 34 | def test_should_count_with_doc_types(self): 35 | self.es.index(index='index', doc_type=DOC_TYPE, body={'data': 'test1'}) 36 | self.es.index(index='index', doc_type='different-doc-type', body={'data': 'test2'}) 37 | count = self.es.count(doc_type=DOC_TYPE) 38 | self.assertEqual(1, count.get('count')) 39 | -------------------------------------------------------------------------------- /tests/fake_elasticsearch/test_delete.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from elasticsearch.exceptions import NotFoundError 4 | 5 | from tests import TestElasticmock, INDEX_NAME, DOC_TYPE, BODY 6 | 7 | 8 | class TestDelete(TestElasticmock): 9 | 10 | def test_should_raise_exception_when_delete_nonindexed_document(self): 11 | with self.assertRaises(NotFoundError): 12 | self.es.delete(index=INDEX_NAME, doc_type=DOC_TYPE, id=1) 13 | 14 | def test_should_not_raise_exception_when_delete_nonindexed_document_if_ignored(self): 15 | target_doc = self.es.delete(index=INDEX_NAME, doc_type=DOC_TYPE, id=1, ignore=404) 16 | self.assertFalse(target_doc.get('found')) 17 | 18 | def test_should_not_raise_exception_when_delete_nonindexed_document_if_ignored_list(self): 19 | target_doc = self.es.delete(index=INDEX_NAME, doc_type=DOC_TYPE, id=1, ignore=(401, 404)) 20 | self.assertFalse(target_doc.get('found')) 21 | 22 | def test_should_delete_indexed_document(self): 23 | doc_indexed = self.es.index(index=INDEX_NAME, doc_type=DOC_TYPE, body=BODY) 24 | search = self.es.search(index=INDEX_NAME) 25 | self.assertEqual(1, search.get('hits').get('total').get('value')) 26 | 27 | doc_id = doc_indexed.get('_id') 28 | doc_deleted = self.es.delete(index=INDEX_NAME, doc_type=DOC_TYPE, id=doc_id) 29 | search = self.es.search(index=INDEX_NAME) 30 | self.assertEqual(0, search.get('hits').get('total').get('value')) 31 | 32 | expected_doc_deleted = { 33 | 'found': True, 34 | '_index': INDEX_NAME, 35 | '_type': DOC_TYPE, 36 | '_id': doc_id, 37 | '_version': 1, 38 | } 39 | 40 | self.assertDictEqual(expected_doc_deleted, doc_deleted) 41 | -------------------------------------------------------------------------------- /tests/fake_elasticsearch/test_exists.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from tests import TestElasticmock, INDEX_NAME, DOC_TYPE, BODY 4 | 5 | 6 | class TestExists(TestElasticmock): 7 | 8 | def test_should_return_exists_false_if_nonindexed_id_is_used(self): 9 | self.assertFalse(self.es.exists(index=INDEX_NAME, doc_type=DOC_TYPE, id=1)) 10 | 11 | def test_should_return_exists_true_if_indexed_id_is_used(self): 12 | data = self.es.index(index=INDEX_NAME, doc_type=DOC_TYPE, body=BODY) 13 | document_id = data.get('_id') 14 | self.assertTrue(self.es.exists(index=INDEX_NAME, doc_type=DOC_TYPE, id=document_id)) 15 | -------------------------------------------------------------------------------- /tests/fake_elasticsearch/test_get.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from elasticsearch.exceptions import NotFoundError 4 | 5 | from tests import TestElasticmock, INDEX_NAME, DOC_TYPE, BODY 6 | 7 | 8 | class TestGet(TestElasticmock): 9 | 10 | def test_should_raise_notfounderror_when_nonindexed_id_is_used(self): 11 | with self.assertRaises(NotFoundError): 12 | self.es.get(index=INDEX_NAME, id='1') 13 | 14 | def test_should_not_raise_notfounderror_when_nonindexed_id_is_used_and_ignored(self): 15 | target_doc = self.es.get(index=INDEX_NAME, id='1', ignore=404) 16 | self.assertFalse(target_doc.get('found')) 17 | 18 | def test_should_not_raise_notfounderror_when_nonindexed_id_is_used_and_ignored_list(self): 19 | target_doc = self.es.get(index=INDEX_NAME, id='1', ignore=(401, 404)) 20 | self.assertFalse(target_doc.get('found')) 21 | 22 | def test_should_get_document_with_id(self): 23 | data = self.es.index(index=INDEX_NAME, doc_type=DOC_TYPE, body=BODY) 24 | 25 | document_id = data.get('_id') 26 | target_doc = self.es.get(index=INDEX_NAME, id=document_id) 27 | 28 | expected = { 29 | '_type': DOC_TYPE, 30 | '_source': BODY, 31 | '_index': INDEX_NAME, 32 | '_version': 1, 33 | 'found': True, 34 | '_id': document_id 35 | } 36 | 37 | self.assertDictEqual(expected, target_doc) 38 | 39 | def test_should_get_document_with_id_and_doc_type(self): 40 | data = self.es.index(index=INDEX_NAME, doc_type=DOC_TYPE, body=BODY) 41 | 42 | document_id = data.get('_id') 43 | target_doc = self.es.get(index=INDEX_NAME, id=document_id, doc_type=DOC_TYPE) 44 | 45 | expected = { 46 | '_type': DOC_TYPE, 47 | '_source': BODY, 48 | '_index': INDEX_NAME, 49 | '_version': 1, 50 | 'found': True, 51 | '_id': document_id 52 | } 53 | 54 | self.assertDictEqual(expected, target_doc) 55 | 56 | def test_should_get_only_document_source_with_id(self): 57 | data = self.es.index(index=INDEX_NAME, doc_type=DOC_TYPE, body=BODY) 58 | 59 | document_id = data.get('_id') 60 | target_doc_source = self.es.get_source(index=INDEX_NAME, doc_type=DOC_TYPE, id=document_id) 61 | 62 | self.assertEqual(target_doc_source, BODY) 63 | 64 | def test_mget_get_several_documents_by_id(self): 65 | ids = [] 66 | for _ in range(0, 10): 67 | data = self.es.index(index=INDEX_NAME, doc_type=DOC_TYPE, body=BODY) 68 | ids.append(data.get('_id')) 69 | results = self.es.mget(index=INDEX_NAME, body={'docs': [{'_id': id} for id in ids]}) 70 | self.assertEqual(len(results['docs']), 10) 71 | -------------------------------------------------------------------------------- /tests/fake_elasticsearch/test_index.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from tests import TestElasticmock, INDEX_NAME, DOC_TYPE, BODY 4 | 5 | UPDATED_BODY = { 6 | 'author': 'vrcmarcos', 7 | 'text': 'Updated Text' 8 | } 9 | 10 | 11 | class TestIndex(TestElasticmock): 12 | 13 | def test_should_index_document(self): 14 | data = self.es.index(index=INDEX_NAME, doc_type=DOC_TYPE, body=BODY) 15 | 16 | self.assertEqual(DOC_TYPE, data.get('_type')) 17 | self.assertTrue(data.get('created')) 18 | self.assertEqual(1, data.get('_version')) 19 | self.assertEqual(INDEX_NAME, data.get('_index')) 20 | self.assertEqual('created', data.get('result')) 21 | 22 | def test_should_index_document_without_doc_type(self): 23 | data = self.es.index(index=INDEX_NAME, body=BODY) 24 | 25 | self.assertEqual('_doc', data.get('_type')) 26 | self.assertTrue(data.get('created')) 27 | self.assertEqual(1, data.get('_version')) 28 | self.assertEqual(INDEX_NAME, data.get('_index')) 29 | 30 | def test_doc_type_can_be_list(self): 31 | doc_types = ['1_idx', '2_idx', '3_idx'] 32 | count_per_doc_type = 3 33 | 34 | for doc_type in doc_types: 35 | for _ in range(count_per_doc_type): 36 | self.es.index(index=INDEX_NAME, doc_type=doc_type, body={}) 37 | 38 | result = self.es.search(doc_type=[doc_types[0]]) 39 | self.assertEqual(count_per_doc_type, result.get('hits').get('total').get('value')) 40 | 41 | result = self.es.search(doc_type=doc_types[:2]) 42 | self.assertEqual(count_per_doc_type * 2, result.get('hits').get('total').get('value')) 43 | 44 | def test_update_existing_doc(self): 45 | data = self.es.index(index=INDEX_NAME, doc_type=DOC_TYPE, body=BODY) 46 | document_id = data.get('_id') 47 | self.es.index(index=INDEX_NAME, id=document_id, doc_type=DOC_TYPE, body=UPDATED_BODY) 48 | target_doc = self.es.get(index=INDEX_NAME, id=document_id) 49 | 50 | expected = { 51 | '_type': DOC_TYPE, 52 | '_source': UPDATED_BODY, 53 | '_index': INDEX_NAME, 54 | '_version': 2, 55 | 'found': True, 56 | '_id': document_id 57 | } 58 | 59 | self.assertDictEqual(expected, target_doc) 60 | 61 | 62 | def test_update_by_query(self): 63 | data = self.es.index(index=INDEX_NAME, doc_type=DOC_TYPE, body=BODY) 64 | document_id = data.get('_id') 65 | new_author = 'kimchy2' 66 | self.es.update_by_query(index=INDEX_NAME, body={ 67 | 'query': { 68 | 'match': {'author': 'kimchy'}, 69 | }, 70 | 'script': { 71 | 'source': 'ctx._source.author = params.author', 72 | 'params': { 73 | 'author': new_author 74 | } 75 | } 76 | }) 77 | target_doc = self.es.get(index=INDEX_NAME, id=document_id) 78 | self.assertEqual(target_doc['_source']['author'], new_author) -------------------------------------------------------------------------------- /tests/fake_elasticsearch/test_info.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from tests import TestElasticmock 4 | 5 | 6 | class TestInfo(TestElasticmock): 7 | 8 | def test_should_return_status_200_for_info(self): 9 | info = self.es.info() 10 | self.assertEqual(info.get('status'), 200) 11 | -------------------------------------------------------------------------------- /tests/fake_elasticsearch/test_instance.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | import elasticsearch 4 | 5 | from elasticmock import elasticmock 6 | from elasticmock.fake_elasticsearch import FakeElasticsearch 7 | from tests import TestElasticmock 8 | 9 | 10 | class TestInstance(TestElasticmock): 11 | 12 | def test_should_create_fake_elasticsearch_instance(self): 13 | self.assertIsInstance(self.es, FakeElasticsearch) 14 | 15 | @elasticmock 16 | def test_should_return_same_elastic_instance_when_instantiate_more_than_one_instance_with_same_host(self): 17 | es1 = elasticsearch.Elasticsearch(hosts=[{'host': 'localhost', 'port': 9200}]) 18 | es2 = elasticsearch.Elasticsearch(hosts=[{'host': 'localhost', 'port': 9200}]) 19 | self.assertEqual(es1, es2) 20 | -------------------------------------------------------------------------------- /tests/fake_elasticsearch/test_ping.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from tests import TestElasticmock 4 | 5 | 6 | class TestPing(TestElasticmock): 7 | 8 | def test_should_return_true_when_ping(self): 9 | self.assertTrue(self.es.ping()) 10 | -------------------------------------------------------------------------------- /tests/fake_elasticsearch/test_scroll.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from tests import TestElasticmock, INDEX_NAME, DOC_TYPE, BODY 4 | 5 | 6 | class TestScroll(TestElasticmock): 7 | 8 | def test_scrolling(self): 9 | for _ in range(100): 10 | self.es.index(index=INDEX_NAME, doc_type=DOC_TYPE, body=BODY) 11 | 12 | result = self.es.search(index=INDEX_NAME, params={'scroll': '1m', 'size': 30}) 13 | self.__assert_scroll(result, 30) 14 | 15 | for _ in range(2): 16 | result = self.es.scroll(scroll_id=result.get('_scroll_id'), scroll='1m') 17 | self.__assert_scroll(result, 30) 18 | 19 | result = self.es.scroll(scroll_id=result.get('_scroll_id'), scroll='1m') 20 | self.__assert_scroll(result, 10) 21 | 22 | def __assert_scroll(self, result, expected_scroll_hits): 23 | hits = result.get('hits') 24 | 25 | self.assertNotEqual(None, result.get('_scroll_id', None)) 26 | self.assertEqual(expected_scroll_hits, len(hits.get('hits'))) 27 | self.assertEqual(100, hits.get('total').get('value')) 28 | -------------------------------------------------------------------------------- /tests/fake_elasticsearch/test_search.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import datetime 3 | 4 | from elasticsearch.exceptions import NotFoundError 5 | from parameterized import parameterized 6 | 7 | from tests import TestElasticmock, INDEX_NAME, DOC_TYPE 8 | 9 | 10 | class TestSearch(TestElasticmock): 11 | 12 | def test_should_raise_notfounderror_when_search_for_unexistent_index(self): 13 | with self.assertRaises(NotFoundError): 14 | self.es.search(index=INDEX_NAME) 15 | 16 | def test_should_return_hits_hits_even_when_no_result(self): 17 | search = self.es.search() 18 | self.assertEqual(0, search.get('hits').get('total').get('value')) 19 | self.assertListEqual([], search.get('hits').get('hits')) 20 | 21 | def test_should_return_skipped_shards(self): 22 | search = self.es.search() 23 | self.assertEqual(0, search.get('_shards').get('skipped')) 24 | 25 | def test_should_return_all_documents(self): 26 | index_quantity = 10 27 | for i in range(0, index_quantity): 28 | self.es.index(index='index_{0}'.format(i), doc_type=DOC_TYPE, body={'data': 'test_{0}'.format(i)}) 29 | 30 | search = self.es.search() 31 | self.assertEqual(index_quantity, search.get('hits').get('total').get('value')) 32 | 33 | def test_should_return_all_documents_match_all(self): 34 | index_quantity = 10 35 | for i in range(0, index_quantity): 36 | self.es.index(index='index_{0}'.format(i), doc_type=DOC_TYPE, body={'data': 'test_{0}'.format(i)}) 37 | 38 | search = self.es.search(body={'query': {'match_all': {}}}) 39 | self.assertEqual(index_quantity, search.get('hits').get('total').get('value')) 40 | 41 | def test_should_return_only_indexed_documents_on_index(self): 42 | index_quantity = 2 43 | for i in range(0, index_quantity): 44 | self.es.index(index=INDEX_NAME, doc_type=DOC_TYPE, body={'data': 'test_{0}'.format(i)}) 45 | 46 | search = self.es.search(index=INDEX_NAME) 47 | self.assertEqual(index_quantity, search.get('hits').get('total').get('value')) 48 | 49 | def test_should_return_only_indexed_documents_on_index_with_doc_type(self): 50 | index_quantity = 2 51 | for i in range(0, index_quantity): 52 | self.es.index(index=INDEX_NAME, doc_type=DOC_TYPE, body={'data': 'test_{0}'.format(i)}) 53 | self.es.index(index=INDEX_NAME, doc_type='another-Doctype', body={'data': 'test'}) 54 | 55 | search = self.es.search(index=INDEX_NAME, doc_type=DOC_TYPE) 56 | self.assertEqual(index_quantity, search.get('hits').get('total').get('value')) 57 | 58 | def test_should_search_in_multiple_indexes(self): 59 | self.es.index(index='groups', doc_type='groups', body={'budget': 1000}) 60 | self.es.index(index='users', doc_type='users', body={'name': 'toto'}) 61 | self.es.index(index='pcs', doc_type='pcs', body={'model': 'macbook'}) 62 | 63 | result = self.es.search(index=['users', 'pcs']) 64 | self.assertEqual(2, result.get('hits').get('total').get('value')) 65 | 66 | def test_usage_of_aggregations(self): 67 | self.es.index(index='index', doc_type='document', body={'genre': 'rock'}) 68 | 69 | body = {"aggs": {"genres": {"terms": {"field": "genre"}}}} 70 | result = self.es.search(index='index', body=body) 71 | 72 | self.assertTrue('aggregations' in result) 73 | 74 | def test_search_with_scroll_param(self): 75 | for _ in range(100): 76 | self.es.index(index='groups', doc_type='groups', body={'budget': 1000}) 77 | 78 | result = self.es.search(index='groups', params={'scroll': '1m', 'size': 30}) 79 | self.assertNotEqual(None, result.get('_scroll_id', None)) 80 | self.assertEqual(30, len(result.get('hits').get('hits'))) 81 | self.assertEqual(100, result.get('hits').get('total').get('value')) 82 | 83 | def test_search_with_match_query(self): 84 | for i in range(0, 10): 85 | self.es.index(index='index_for_search', doc_type=DOC_TYPE, body={'data': 'test_{0}'.format(i)}) 86 | 87 | response = self.es.search(index='index_for_search', doc_type=DOC_TYPE, 88 | body={'query': {'match': {'data': 'TEST'}}}) 89 | self.assertEqual(response['hits']['total']['value'], 10) 90 | hits = response['hits']['hits'] 91 | self.assertEqual(len(hits), 10) 92 | 93 | response = self.es.search(index='index_for_search', doc_type=DOC_TYPE, body={'query': {'match': {'data': '3'}}}) 94 | self.assertEqual(response['hits']['total']['value'], 1) 95 | hits = response['hits']['hits'] 96 | self.assertEqual(len(hits), 1) 97 | self.assertEqual(hits[0]['_source'], {'data': 'test_3'}) 98 | 99 | def test_search_with_match_query_in_int_list(self): 100 | for i in range(0, 10): 101 | self.es.index(index='index_for_search', doc_type=DOC_TYPE, body={'data': [i, 11, 13]}) 102 | response = self.es.search(index='index_for_search', doc_type=DOC_TYPE, body={'query': {'match': {'data': 1}}}) 103 | self.assertEqual(response['hits']['total']['value'], 1) 104 | hits = response['hits']['hits'] 105 | self.assertEqual(len(hits), 1) 106 | self.assertEqual(hits[0]['_source'], {'data': [1, 11, 13]}) 107 | 108 | def test_search_with_match_query_in_string_list(self): 109 | for i in range(0, 10): 110 | self.es.index(index='index_for_search', doc_type=DOC_TYPE, body={'data': [str(i), 'two', 'three']}) 111 | 112 | response = self.es.search(index='index_for_search', doc_type=DOC_TYPE, body={'query': {'match': {'data': '1'}}}) 113 | self.assertEqual(response['hits']['total']['value'], 1) 114 | hits = response['hits']['hits'] 115 | self.assertEqual(len(hits), 1) 116 | self.assertEqual(hits[0]['_source'], {'data': ['1', 'two', 'three']}) 117 | 118 | def test_search_with_term_query(self): 119 | for i in range(0, 10): 120 | self.es.index(index='index_for_search', doc_type=DOC_TYPE, body={'data': 'test_{0}'.format(i)}) 121 | 122 | response = self.es.search(index='index_for_search', doc_type=DOC_TYPE, 123 | body={'query': {'term': {'data': 'TEST'}}}) 124 | self.assertEqual(response['hits']['total']['value'], 0) 125 | hits = response['hits']['hits'] 126 | self.assertEqual(len(hits), 0) 127 | 128 | response = self.es.search(index='index_for_search', doc_type=DOC_TYPE, body={'query': {'term': {'data': '3'}}}) 129 | self.assertEqual(response['hits']['total']['value'], 1) 130 | hits = response['hits']['hits'] 131 | self.assertEqual(len(hits), 1) 132 | self.assertEqual(hits[0]['_source'], {'data': 'test_3'}) 133 | 134 | def test_search_with_bool_query(self): 135 | for i in range(0, 10): 136 | self.es.index(index='index_for_search', doc_type=DOC_TYPE, body={'id': i}) 137 | 138 | response = self.es.search(index='index_for_search', doc_type=DOC_TYPE, 139 | body={'query': {'bool': {'filter': [{'term': {'id': 1}}]}}}) 140 | self.assertEqual(response['hits']['total']['value'], 1) 141 | hits = response['hits']['hits'] 142 | self.assertEqual(len(hits), 1) 143 | 144 | def test_search_with_must_not_query(self): 145 | for i in range(0, 10): 146 | self.es.index(index='index_for_search', doc_type=DOC_TYPE, body={'id': i}) 147 | response = self.es.search(index='index_for_search', doc_type=DOC_TYPE, 148 | body={'query': {'bool': { 149 | 'filter': [{'terms': {'id': [1, 2]}}], 150 | 'must_not': [{'term': {'id': 1}}], 151 | }}}) 152 | self.assertEqual(response['hits']['total']['value'], 1) 153 | doc = response['hits']['hits'][0]['_source'] 154 | self.assertEqual(2, doc['id']) 155 | 156 | def test_search_with_terms_query(self): 157 | for i in range(0, 10): 158 | self.es.index(index='index_for_search', doc_type=DOC_TYPE, body={'id': i}) 159 | 160 | response = self.es.search(index='index_for_search', doc_type=DOC_TYPE, 161 | body={'query': {'terms': {'id': [1, 2, 3]}}}) 162 | self.assertEqual(response['hits']['total']['value'], 3) 163 | hits = response['hits']['hits'] 164 | self.assertEqual(len(hits), 3) 165 | 166 | def test_query_on_nested_data(self): 167 | for i, y in enumerate(['yes', 'no']): 168 | self.es.index('index_for_search', doc_type=DOC_TYPE, 169 | body={'id': i, 'data': {'x': i, 'y': y}}) 170 | 171 | for term, value, i in [('data.x', 1, 1), ('data.y', 'yes', 0)]: 172 | response = self.es.search(index='index_for_search', doc_type=DOC_TYPE, 173 | body={'query': {'term': {term: value}}}) 174 | self.assertEqual(1, response['hits']['total']['value']) 175 | doc = response['hits']['hits'][0]['_source'] 176 | self.assertEqual(i, doc['id']) 177 | 178 | 179 | def test_search_with_bool_query_and_multi_match(self): 180 | for i in range(0, 10): 181 | self.es.index(index='index_for_search', doc_type=DOC_TYPE, body={ 182 | 'data': 'test_{0}'.format(i) if i % 2 == 0 else None, 183 | 'data2': 'test_{0}'.format(i) if (i+1) % 2 == 0 else None 184 | }) 185 | 186 | search_body = { 187 | "query": { 188 | "bool": { 189 | "must": { 190 | "multi_match": { 191 | "query": "test", 192 | "fields": ["data", "data2"] 193 | } 194 | } 195 | } 196 | } 197 | } 198 | response = self.es.search(index='index_for_search', doc_type=DOC_TYPE, 199 | body=search_body) 200 | self.assertEqual(response['hits']['total']['value'], 10) 201 | hits = response['hits']['hits'] 202 | self.assertEqual(len(hits), 10) 203 | 204 | def test_search_bool_should_match_query(self): 205 | for i in range(0, 10): 206 | self.es.index(index='index_for_search', doc_type=DOC_TYPE, body={'data': 'test_{0}'.format(i)}) 207 | 208 | response = self.es.search(index='index_for_search', doc_type=DOC_TYPE, 209 | body={ 210 | 'query': { 211 | 'bool': { 212 | 'should': [ 213 | {'match': {'data': 'test_0'}}, 214 | {'match': {'data': 'test_1'}}, 215 | {'match': {'data': 'test_2'}}, 216 | ] 217 | } 218 | } 219 | }) 220 | self.assertEqual(response['hits']['total']['value'], 3) 221 | hits = response['hits']['hits'] 222 | self.assertEqual(len(hits), 3) 223 | self.assertEqual(hits[0]['_source'], {'data': 'test_0'}) 224 | 225 | def test_msearch(self): 226 | for i in range(0, 10): 227 | self.es.index(index='index_for_search1', doc_type=DOC_TYPE, body={ 228 | 'data': 'test_{0}'.format(i) if i % 2 == 0 else None, 229 | 'data2': 'test_{0}'.format(i) if (i+1) % 2 == 0 else None 230 | }) 231 | for i in range(0, 10): 232 | self.es.index(index='index_for_search2', doc_type=DOC_TYPE, body={ 233 | 'data': 'test_{0}'.format(i) if i % 2 == 0 else None, 234 | 'data2': 'test_{0}'.format(i) if (i+1) % 2 == 0 else None 235 | }) 236 | 237 | search_body = { 238 | "query": { 239 | "bool": { 240 | "must": { 241 | "multi_match": { 242 | "query": "test", 243 | "fields": ["data", "data2"] 244 | } 245 | } 246 | } 247 | } 248 | } 249 | body = [] 250 | body.append({'index': 'index_for_search1'}) 251 | body.append(search_body) 252 | body.append({'index': 'index_for_search2'}) 253 | body.append(search_body) 254 | 255 | result = self.es.msearch(index='index_for_search', body=body) 256 | response1, response2 = result['responses'] 257 | self.assertEqual(response1['hits']['total']['value'], 10) 258 | hits1 = response1['hits']['hits'] 259 | self.assertEqual(len(hits1), 10) 260 | self.assertEqual(response2['hits']['total']['value'], 10) 261 | hits2 = response2['hits']['hits'] 262 | self.assertEqual(len(hits2), 10) 263 | 264 | @parameterized.expand( 265 | [ 266 | ( 267 | 'timestamp gt', 268 | {'timestamp': {'gt': datetime.datetime(2009, 1, 1, 10, 20, 0).isoformat()}}, 269 | range(5, 12), 270 | ), 271 | ( 272 | 'timestamp gte', 273 | {'timestamp': {'gte': datetime.datetime(2009, 1, 1, 10, 20, 0).isoformat()}}, 274 | range(4, 12), 275 | ), 276 | ( 277 | 'timestamp lt', 278 | {'timestamp': {'lt': datetime.datetime(2009, 1, 1, 10, 35, 0).isoformat()}}, 279 | range(7), 280 | ), 281 | ( 282 | 'timestamp lte', 283 | {'timestamp': {'lte': datetime.datetime(2009, 1, 1, 10, 35, 0).isoformat()}}, 284 | range(8), 285 | ), 286 | ( 287 | 'timestamp combination', 288 | { 289 | 'timestamp': { 290 | 'gt': datetime.datetime(2009, 1, 1, 10, 15, 0).isoformat(), 291 | 'lte': datetime.datetime(2009, 1, 1, 10, 35, 0).isoformat(), 292 | } 293 | }, 294 | range(4, 8), 295 | ), 296 | ( 297 | 'data_int gt', 298 | {'data_int': {'gt': 40}}, 299 | range(5, 12), 300 | ), 301 | ( 302 | 'data_int gte', 303 | {'data_int': {'gte': 40}}, 304 | range(4, 12), 305 | ), 306 | ( 307 | 'data_int lt', 308 | {'data_int': {'lt': 70}}, 309 | range(7), 310 | ), 311 | ( 312 | 'data_int lte', 313 | {'data_int': {'lte': 70}}, 314 | range(8), 315 | ), 316 | ( 317 | 'data_int combination', 318 | {'data_int': {'gt': 30, 'lte': 70}}, 319 | range(4, 8), 320 | ), 321 | ] 322 | ) 323 | def test_search_with_range_query(self, _, query_range, expected_ids): 324 | for i in range(0, 12): 325 | body = { 326 | 'id': i, 327 | 'timestamp': datetime.datetime(2009, 1, 1, 10, 5 * i, 0), 328 | 'data_int': 10 * i, 329 | } 330 | self.es.index(index='index_for_search', doc_type=DOC_TYPE, body=body) 331 | 332 | response = self.es.search( 333 | index='index_for_search', 334 | doc_type=DOC_TYPE, 335 | body={'query': {'range': query_range}}, 336 | ) 337 | 338 | self.assertEqual(len(expected_ids), response['hits']['total']['value']) 339 | hits = response['hits']['hits'] 340 | self.assertEqual(set(expected_ids), set(hit['_source']['id'] for hit in hits)) 341 | 342 | def test_bucket_aggregation(self): 343 | data = [ 344 | {"data_x": 1, "data_y": "a"}, 345 | {"data_x": 1, "data_y": "a"}, 346 | {"data_x": 2, "data_y": "a"}, 347 | {"data_x": 2, "data_y": "b"}, 348 | {"data_x": 3, "data_y": "b"}, 349 | ] 350 | for body in data: 351 | self.es.index(index='index_for_search', doc_type=DOC_TYPE, body=body) 352 | 353 | response = self.es.search( 354 | index="index_for_search", 355 | doc_type=DOC_TYPE, 356 | body={ 357 | "query": {"match_all": {}}, 358 | "aggs": { 359 | "stats": { 360 | "composite": { 361 | "sources": [{"data_x": {"terms": {"field": "data_x"}}}], 362 | "size": 10000, 363 | }, 364 | "aggs": { 365 | "distinct_data_y": {"cardinality": {"field": "data_y"}} 366 | }, 367 | } 368 | }, 369 | }, 370 | ) 371 | 372 | expected = [ 373 | {"key": {"data_x": 1}, "doc_count": 2}, 374 | {"key": {"data_x": 2}, "doc_count": 2}, 375 | {"key": {"data_x": 3}, "doc_count": 1}, 376 | ] 377 | actual = response["aggregations"]["stats"]["buckets"] 378 | 379 | for x, y in zip(expected, actual): 380 | self.assertDictEqual(x["key"], y["key"]) 381 | self.assertEqual(x["doc_count"], y["doc_count"]) 382 | -------------------------------------------------------------------------------- /tests/fake_elasticsearch/test_suggest.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from elasticsearch.exceptions import NotFoundError 4 | 5 | from tests import TestElasticmock, INDEX_NAME, DOC_TYPE, BODY 6 | 7 | 8 | class TestSuggest(TestElasticmock): 9 | 10 | def test_should_raise_notfounderror_when_nonindexed_id_is_used_for_suggest(self): 11 | with self.assertRaises(NotFoundError): 12 | self.es.suggest(body={}, index=INDEX_NAME) 13 | 14 | def test_should_return_suggestions(self): 15 | self.es.index(index=INDEX_NAME, doc_type=DOC_TYPE, body=BODY) 16 | suggestion_body = { 17 | 'suggestion-string': { 18 | 'text': 'test_text', 19 | 'term': { 20 | 'field': 'string' 21 | } 22 | }, 23 | 'suggestion-id': { 24 | 'text': 1234567, 25 | 'term': { 26 | 'field': 'id' 27 | } 28 | } 29 | } 30 | suggestion = self.es.suggest(body=suggestion_body, index=INDEX_NAME) 31 | self.assertIsNotNone(suggestion) 32 | self.assertDictEqual({ 33 | 'suggestion-string': [ 34 | { 35 | 'text': 'test_text', 36 | 'length': 1, 37 | 'options': [ 38 | { 39 | 'text': 'test_text_suggestion', 40 | 'freq': 1, 41 | 'score': 1.0 42 | } 43 | ], 44 | 'offset': 0 45 | } 46 | ], 47 | 'suggestion-id': [ 48 | { 49 | 'text': 1234567, 50 | 'length': 1, 51 | 'options': [ 52 | { 53 | 'text': 1234568, 54 | 'freq': 1, 55 | 'score': 1.0 56 | } 57 | ], 58 | 'offset': 0 59 | } 60 | ], 61 | }, suggestion) 62 | -------------------------------------------------------------------------------- /tests/fake_indices/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | -------------------------------------------------------------------------------- /tests/fake_indices/test_create.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from tests import TestElasticmock, INDEX_NAME 4 | 5 | 6 | class TestCreate(TestElasticmock): 7 | 8 | def test_should_create_index(self): 9 | self.assertFalse(self.es.indices.exists(INDEX_NAME)) 10 | self.es.indices.create(INDEX_NAME) 11 | self.assertTrue(self.es.indices.exists(INDEX_NAME)) 12 | -------------------------------------------------------------------------------- /tests/fake_indices/test_delete.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from tests import TestElasticmock, INDEX_NAME 4 | 5 | 6 | class TestDelete(TestElasticmock): 7 | 8 | def test_should_delete_index(self): 9 | self.assertFalse(self.es.indices.exists(INDEX_NAME)) 10 | 11 | self.es.indices.create(INDEX_NAME) 12 | self.assertTrue(self.es.indices.exists(INDEX_NAME)) 13 | 14 | self.es.indices.delete(INDEX_NAME) 15 | self.assertFalse(self.es.indices.exists(INDEX_NAME)) 16 | 17 | def test_should_delete_inexistent_index(self): 18 | self.assertFalse(self.es.indices.exists(INDEX_NAME)) 19 | 20 | self.es.indices.delete(INDEX_NAME) 21 | self.assertFalse(self.es.indices.exists(INDEX_NAME)) 22 | -------------------------------------------------------------------------------- /tests/fake_indices/test_exists.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from tests import TestElasticmock, INDEX_NAME 4 | 5 | 6 | class TestExists(TestElasticmock): 7 | 8 | def test_should_return_false_when_index_does_not_exists(self): 9 | self.assertFalse(self.es.indices.exists(INDEX_NAME)) 10 | 11 | def test_should_return_true_when_index_exists(self): 12 | self.es.indices.create(INDEX_NAME) 13 | self.assertTrue(self.es.indices.exists(INDEX_NAME)) 14 | -------------------------------------------------------------------------------- /tests/fake_indices/test_refresh.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from tests import TestElasticmock, INDEX_NAME 4 | 5 | 6 | class TestRefresh(TestElasticmock): 7 | 8 | def test_should_refresh_index(self): 9 | self.es.indices.create(INDEX_NAME) 10 | self.es.indices.refresh(INDEX_NAME) 11 | self.assertTrue(self.es.indices.exists(INDEX_NAME)) 12 | -------------------------------------------------------------------------------- /tests/tox_banner.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from __future__ import print_function 4 | 5 | import platform 6 | 7 | import elasticsearch 8 | 9 | print( 10 | "{} {}; ElasticSearch {}".format( 11 | platform.python_implementation(), 12 | platform.python_version(), 13 | elasticsearch.VERSION 14 | ) 15 | ) 16 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | # content of: tox.ini , put in same dir as setup.py 2 | [tox] 3 | envlist = 4 | py36-elasticsearch{1,2,5,6,7} 5 | py37-elasticsearch{1,2,5,6,7} 6 | py38-elasticsearch{1,2,5,6,7} 7 | py39-elasticsearch{1,2,5,6,7} 8 | 9 | [testenv] 10 | deps = 11 | parameterized 12 | pytest==4.6.9 13 | pytest-cov==2.8.1 14 | elasticsearch1: elasticsearch ==1.9.0 15 | elasticsearch2: elasticsearch >=2.0.0, <5.0.0 16 | elasticsearch5: elasticsearch >=5.0.0, <6.0.0 17 | elasticsearch6: elasticsearch >=6.0.0, <7.0.0 18 | elasticsearch7: elasticsearch >=7.0.0, <8.0.0 19 | commands = 20 | python -c "import tests.tox_banner" 21 | py.test --cov-report term-missing --cov=elasticmock 22 | --------------------------------------------------------------------------------