├── .circleci ├── circle_requirements.txt └── config.yml ├── .github ├── release-drafter-config.yml └── workflows │ ├── check-pypi.yml │ ├── publish-pypi.yml │ └── release-drafter.yml ├── .gitignore ├── LICENSE ├── MANIFEST.in ├── README.md ├── pyproject.toml ├── redistimeseries ├── __init__.py ├── _version.py └── client.py ├── test_commands.py └── tox.ini /.circleci/circle_requirements.txt: -------------------------------------------------------------------------------- 1 | poetry>=1.1.0 2 | tox>=3.23.1 3 | tox-poetry>=0.4.0 4 | -------------------------------------------------------------------------------- /.circleci/config.yml: -------------------------------------------------------------------------------- 1 | 2 | # Python CircleCI 2.0 configuration file 3 | # 4 | # Check https://circleci.com/docs/2.0/language-python/ for more details 5 | # 6 | version: 2.1 7 | 8 | commands: 9 | 10 | abort_for_docs: 11 | steps: 12 | - run: 13 | name: Avoid tests for docs 14 | command: | 15 | if [[ $CIRCLE_BRANCH == *docs ]]; then 16 | echo "Identifies as documents PR, no testing required" 17 | circleci step halt 18 | fi 19 | 20 | abort_for_noci: 21 | steps: 22 | - run: 23 | name: Ignore CI for specific branches 24 | command: | 25 | if [[ $CIRCLE_BRANCH == *noci ]]; then 26 | echo "Identifies as actively ignoring CI, no testing required." 27 | circleci step halt 28 | fi 29 | 30 | 31 | early_return_for_forked_pull_requests: 32 | description: >- 33 | If this build is from a fork, stop executing the current job and return success. 34 | This is useful to avoid steps that will fail due to missing credentials. 35 | steps: 36 | - run: 37 | name: Early return if this build is from a forked PR 38 | command: | 39 | if [[ -n "$CIRCLE_PR_NUMBER" ]]; then 40 | echo "Nothing to do for forked PRs, so marking this step successful" 41 | circleci step halt 42 | fi 43 | 44 | build_and_test: 45 | steps: 46 | - abort_for_docs 47 | - abort_for_noci 48 | - checkout 49 | 50 | - run: 51 | name: install tox dependencies 52 | command: | 53 | pip install --user --quiet --upgrade pip virtualenv==20.7.2 54 | pip install --user --quiet -r .circleci/circle_requirements.txt 55 | 56 | - run: 57 | name: build sdist and wheels 58 | command: | 59 | # https://github.com/python-poetry/poetry/issues/4210 60 | poetry config experimental.new-installer false 61 | poetry build 62 | 63 | - run: 64 | name: lint 65 | command: | 66 | tox -e linters 67 | 68 | - run: 69 | name: run tests 70 | command: 71 | tox -e cover 72 | 73 | - store_artifacts: 74 | path: test-reports 75 | destination: test-reports 76 | 77 | 78 | jobs: 79 | build: 80 | parameters: 81 | python_version: 82 | type: string 83 | default: "latest" 84 | docker: 85 | - image: circleci/python:<> 86 | - image: redislabs/redistimeseries:edge 87 | steps: 88 | - build_and_test 89 | 90 | on-any-branch: &on-any-branch 91 | filters: 92 | branches: 93 | only: 94 | - /.*/ 95 | tags: 96 | ignore: /.*/ 97 | 98 | on-master: &on-master 99 | filters: 100 | branches: 101 | only: 102 | - master 103 | 104 | # the is to build and test, per commit against all supported python versions 105 | python-versions: &python-versions 106 | matrix: 107 | parameters: 108 | python_version: 109 | - "3.6.9" 110 | - "3.7.9" 111 | - "3.8.9" 112 | - "3.9.4" 113 | - "3.10.0" 114 | - "latest" 115 | 116 | workflows: 117 | commit: 118 | jobs: 119 | - build: 120 | <<: *on-any-branch 121 | <<: *python-versions 122 | 123 | nightly: 124 | triggers: 125 | - schedule: 126 | cron: "0 0 * * *" 127 | <<: *on-master 128 | jobs: 129 | - build 130 | -------------------------------------------------------------------------------- /.github/release-drafter-config.yml: -------------------------------------------------------------------------------- 1 | name-template: 'Version $NEXT_PATCH_VERSION' 2 | tag-template: 'v$NEXT_PATCH_VERSION' 3 | categories: 4 | - title: 'Features' 5 | labels: 6 | - 'feature' 7 | - 'enhancement' 8 | - title: 'Bug Fixes' 9 | labels: 10 | - 'fix' 11 | - 'bugfix' 12 | - 'bug' 13 | - title: 'Maintenance' 14 | label: 'chore' 15 | change-template: '- $TITLE (#$NUMBER)' 16 | exclude-labels: 17 | - 'skip-changelog' 18 | template: | 19 | ## Changes 20 | 21 | $CHANGES 22 | -------------------------------------------------------------------------------- /.github/workflows/check-pypi.yml: -------------------------------------------------------------------------------- 1 | name: Check if required secrets are set to publish to Pypi 2 | 3 | on: push 4 | 5 | jobs: 6 | checksecret: 7 | name: check if PYPI_TOKEN and TESTPYPI_TOKEN are set in github secrets 8 | runs-on: ubuntu-latest 9 | steps: 10 | - name: Check PYPI_TOKEN 11 | env: 12 | PYPI_TOKEN: ${{ secrets.PYPI_TOKEN }} 13 | run: | 14 | if ${{ env.PYPI_TOKEN == '' }} ; then 15 | echo "PYPI_TOKEN secret is not set" 16 | exit 1 17 | fi 18 | - name: Check TESTPYPI_TOKEN 19 | env: 20 | TESTPYPI_TOKEN: ${{ secrets.TESTPYPI_TOKEN }} 21 | run: | 22 | if ${{ env.TESTPYPI_TOKEN == '' }} ; then 23 | echo "TESTPYPI_TOKEN secret is not set" 24 | exit 1 25 | fi 26 | -------------------------------------------------------------------------------- /.github/workflows/publish-pypi.yml: -------------------------------------------------------------------------------- 1 | name: Publish Pypi 2 | on: 3 | release: 4 | types: [ published ] 5 | 6 | jobs: 7 | pytest: 8 | name: Publish to PyPi 9 | runs-on: ubuntu-latest 10 | env: 11 | ACTIONS_ALLOW_UNSECURE_COMMANDS: true 12 | steps: 13 | - uses: actions/checkout@master 14 | 15 | - name: get version from tag 16 | id: get_version 17 | run: | 18 | realversion="${GITHUB_REF/refs\/tags\//}" 19 | realversion="${realversion//v/}" 20 | echo "::set-output name=VERSION::$realversion" 21 | 22 | - name: Set the version for publishing 23 | uses: ciiiii/toml-editor@1.0.0 24 | with: 25 | file: "pyproject.toml" 26 | key: "tool.poetry.version" 27 | value: "${{ steps.get_version.outputs.VERSION }}" 28 | 29 | - name: Set up Python 3.7 30 | uses: actions/setup-python@v1 31 | with: 32 | python-version: 3.7 33 | 34 | - name: Install Poetry 35 | uses: dschep/install-poetry-action@v1.3 36 | 37 | - name: Cache Poetry virtualenv 38 | uses: actions/cache@v1 39 | id: cache 40 | with: 41 | path: ~/.virtualenvs 42 | key: poetry-${{ hashFiles('**/poetry.lock') }} 43 | restore-keys: | 44 | poetry-${{ hashFiles('**/poetry.lock') }} 45 | 46 | - name: Set Poetry config 47 | run: | 48 | poetry config virtualenvs.in-project false 49 | poetry config virtualenvs.path ~/.virtualenvs 50 | 51 | - name: Install Dependencies 52 | run: poetry install 53 | if: steps.cache.outputs.cache-hit != 'true' 54 | 55 | - name: Publish to PyPI 56 | if: github.event_name == 'release' 57 | run: | 58 | poetry publish -u __token__ -p ${{ secrets.PYPI_TOKEN }} --build 59 | -------------------------------------------------------------------------------- /.github/workflows/release-drafter.yml: -------------------------------------------------------------------------------- 1 | name: Release Drafter 2 | 3 | on: 4 | push: 5 | # branches to consider in the event; optional, defaults to all 6 | branches: 7 | - master 8 | 9 | jobs: 10 | update_release_draft: 11 | runs-on: ubuntu-latest 12 | steps: 13 | # Drafts your next Release notes as Pull Requests are merged into "master" 14 | - uses: release-drafter/release-drafter@v5 15 | with: 16 | # (Optional) specify config name to use, relative to .github/. Default: release-drafter.yml 17 | config-name: release-drafter-config.yml 18 | env: 19 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 20 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | .idea/ 6 | 7 | # C extensions 8 | *.so 9 | 10 | # Distribution / packaging 11 | .Python 12 | build/ 13 | develop-eggs/ 14 | dist/ 15 | downloads/ 16 | eggs/ 17 | .eggs/ 18 | lib/ 19 | lib64/ 20 | parts/ 21 | sdist/ 22 | var/ 23 | wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .coverage 43 | .coverage.* 44 | .cache 45 | nosetests.xml 46 | coverage.xml 47 | *.cover 48 | .hypothesis/ 49 | .pytest_cache/ 50 | 51 | # Translations 52 | *.mo 53 | *.pot 54 | 55 | # Django stuff: 56 | *.log 57 | local_settings.py 58 | db.sqlite3 59 | 60 | # Flask stuff: 61 | instance/ 62 | .webassets-cache 63 | 64 | # Scrapy stuff: 65 | .scrapy 66 | 67 | # Sphinx documentation 68 | docs/_build/ 69 | 70 | # PyBuilder 71 | target/ 72 | 73 | # Jupyter Notebook 74 | .ipynb_checkpoints 75 | 76 | # pyenv 77 | .python-version 78 | 79 | # celery beat schedule file 80 | celerybeat-schedule 81 | 82 | # SageMath parsed files 83 | *.sage.py 84 | 85 | # Environments 86 | .env 87 | .venv 88 | env/ 89 | venv/ 90 | ENV/ 91 | env.bak/ 92 | venv.bak/ 93 | 94 | # Spyder project settings 95 | .spyderproject 96 | .spyproject 97 | 98 | # Rope project settings 99 | .ropeproject 100 | 101 | # mkdocs documentation 102 | /site 103 | 104 | # mypy 105 | .mypy_cache/ 106 | 107 | # eclipe 108 | .project 109 | .pydevproject 110 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | BSD 3-Clause License 2 | 3 | Copyright (c) 2019, RedisTimeSeries 4 | All rights reserved. 5 | 6 | Redistribution and use in source and binary forms, with or without 7 | modification, are permitted provided that the following conditions are met: 8 | 9 | * Redistributions of source code must retain the above copyright notice, this 10 | list of conditions and the following disclaimer. 11 | 12 | * Redistributions in binary form must reproduce the above copyright notice, 13 | this list of conditions and the following disclaimer in the documentation 14 | and/or other materials provided with the distribution. 15 | 16 | * Neither the name of the copyright holder nor the names of its 17 | contributors may be used to endorse or promote products derived from 18 | this software without specific prior written permission. 19 | 20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 23 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 24 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 26 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 27 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 28 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include requirements.txt 2 | include LICENSE 3 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![license](https://img.shields.io/github/license/RedisTimeSeries/redistimeseries-py.svg)](https://github.com/RedisTimeSeries/redistimeseries-py) 2 | [![PyPI version](https://badge.fury.io/py/redistimeseries.svg)](https://badge.fury.io/py/redistimeseries) 3 | [![CircleCI](https://circleci.com/gh/RedisTimeSeries/redistimeseries-py/tree/master.svg?style=svg)](https://circleci.com/gh/RedisTimeSeries/redistimeseries-py/tree/master) 4 | [![GitHub issues](https://img.shields.io/github/release/RedisTimeSeries/redistimeseries-py.svg)](https://github.com/RedisTimeSeries/redistimeseries-py/releases/latest) 5 | [![Codecov](https://codecov.io/gh/RedisTimeSeries/redistimeseries-py/branch/master/graph/badge.svg)](https://codecov.io/gh/RedisTimeSeries/redistimeseries-py) 6 | [![Language grade: Python](https://img.shields.io/lgtm/grade/python/g/RedisTimeSeries/redistimeseries-py.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/RedisTimeSeries/redistimeseries-py/context:python) 7 | [![Known Vulnerabilities](https://snyk.io/test/github/RedisTimeSeries/redistimeseries-py/badge.svg?targetFile=pyproject.toml)](https://snyk.io/test/github/RedisTimeSeries/redistimeseries-py?targetFile=pyproject.toml) 8 | 9 | # redistimeseries-py 10 | [![Forum](https://img.shields.io/badge/Forum-RedisTimeSeries-blue)](https://forum.redislabs.com/c/modules/redistimeseries) 11 | [![Discord](https://img.shields.io/discord/697882427875393627?style=flat-square)](https://discord.gg/KExRgMb) 12 | 13 | ## Deprecation notice 14 | 15 | As of [redis-py 4.0.0](https://pypi.org/project/redis/) this library is deprecated. It's features have been merged into redis-py. Please either install it [from pypy](https://pypi.org/project/redis) or [the repo](https://github.com/redis/redis-py). 16 | 17 | -------------------------------- 18 | 19 | redistimeseries-py is a package that gives developers easy access to RedisTimeSeries module. The package extends [redis-py](https://github.com/andymccurdy/redis-py)'s interface with RedisTimeSeries's API. 20 | 21 | ## Installation 22 | ``` 23 | $ pip install redistimeseries 24 | ``` 25 | 26 | ## Development 27 | 28 | 1. Create a virtualenv to manage your python dependencies, and ensure it's active. 29 | ```virtualenv -v venv``` 30 | 2. Install [pypoetry](https://python-poetry.org/) to manage your dependencies. 31 | ```pip install poetry``` 32 | 3. Install dependencies. 33 | ```poetry install``` 34 | 35 | [tox](https://tox.readthedocs.io/en/latest/) runs all tests as its default target. Running *tox* by itself will run unit tests. Ensure you have a running redis, with the module loaded. 36 | 37 | 38 | 39 | ## API 40 | The complete documentation of RedisTimeSeries's commands can be found at [RedisTimeSeries's website](http://redistimeseries.io/). 41 | 42 | ## Usage example 43 | 44 | ```python 45 | # Simple example 46 | from redistimeseries.client import Client 47 | rts = Client() 48 | rts.create('test', labels={'Time':'Series'}) 49 | rts.add('test', 1, 1.12) 50 | rts.add('test', 2, 1.12) 51 | rts.get('test') 52 | rts.incrby('test',1) 53 | rts.range('test', 0, -1) 54 | rts.range('test', 0, -1, aggregation_type='avg', bucket_size_msec=10) 55 | rts.range('test', 0, -1, aggregation_type='sum', bucket_size_msec=10) 56 | rts.info('test').__dict__ 57 | 58 | # Example with rules 59 | rts.create('source', retention_msecs=40) 60 | rts.create('sumRule') 61 | rts.create('avgRule') 62 | rts.createrule('source', 'sumRule', 'sum', 20) 63 | rts.createrule('source', 'avgRule', 'avg', 15) 64 | rts.add('source', '*', 1) 65 | rts.add('source', '*', 2) 66 | rts.add('source', '*', 3) 67 | rts.get('sumRule') 68 | rts.get('avgRule') 69 | rts.info('sumRule').__dict__ 70 | ``` 71 | 72 | ## Further notes on back-filling time series 73 | 74 | Since [RedisTimeSeries 1.4](https://github.com/RedisTimeSeries/RedisTimeSeries/releases/tag/v1.4.5) we've added the ability to back-fill time series, with different duplicate policies. 75 | 76 | The default behavior is to block updates to the same timestamp, and you can control it via the `duplicate_policy` argument. You can check in detail the [duplicate policy documentation](https://oss.redislabs.com/redistimeseries/configuration/#duplicate_policy). 77 | 78 | Bellow you can find an example of the `LAST` duplicate policy, in which we override duplicate timestamps with the latest value: 79 | 80 | ```python 81 | from redistimeseries.client import Client 82 | rts = Client() 83 | rts.create('last-upsert', labels={'Time':'Series'}, duplicate_policy='last') 84 | rts.add('last-upsert', 1, 10.0) 85 | rts.add('last-upsert', 1, 5.0) 86 | # should output [(1, 5.0)] 87 | print(rts.range('last-upsert', 0, -1)) 88 | ``` 89 | 90 | ## License 91 | [BSD 3-Clause](https://github.com/ashtul/redistimeseries-py/blob/master/LICENSE) 92 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.poetry] 2 | name = "redistimeseries" 3 | version = "1.4.4" 4 | description = "RedisTimeSeries Python Client" 5 | authors = ["RedisLabs "] 6 | license = "BSD-3-Clause" 7 | readme = "README.md" 8 | 9 | classifiers = [ 10 | 'Topic :: Database', 11 | 'Programming Language :: Python', 12 | 'Intended Audience :: Developers', 13 | 'Programming Language :: Python :: 3.6', 14 | 'Programming Language :: Python :: 3.7', 15 | 'Programming Language :: Python :: 3.8', 16 | 'Programming Language :: Python :: 3.9', 17 | 'Programming Language :: Python :: 3.10', 18 | 'License :: OSI Approved :: BSD License', 19 | 'Operating System :: OS Independent', 20 | 'Development Status :: 5 - Production/Stable' 21 | ] 22 | keywords = ["Redis TimeSeries Extension"] 23 | 24 | [tool.poetry.dependencies] 25 | python = "^3.6" 26 | redis = "3.5.3" 27 | importlib-metadata = "^4.0.1" 28 | 29 | 30 | [tool.poetry.urls] 31 | url = "https://redistimeseries.io" 32 | repository = "https://github.com/RedisTimeSeries/redistimeseries-py" 33 | 34 | [tool.poetry.dev-dependencies] 35 | codecov = "^2.1.11" 36 | flake8 = "^3.9.2" 37 | tox = "^3.23.1" 38 | tox-poetry = "^0.3.0" 39 | bandit = "^1.7.0" 40 | vulture = "^2.3" 41 | 42 | [build-system] 43 | requires = ["poetry-core>=1.0.0"] 44 | build-backend = "poetry.core.masonry.api" 45 | -------------------------------------------------------------------------------- /redistimeseries/__init__.py: -------------------------------------------------------------------------------- 1 | from ._version import __version__ 2 | -------------------------------------------------------------------------------- /redistimeseries/_version.py: -------------------------------------------------------------------------------- 1 | # This attribute is the only one place that the version number is written down, 2 | # so there is only one place to change it when the version number changes. 3 | # Edit the pyproject.toml to modify versions 4 | def get_version(): 5 | try: 6 | from importlib.metadata import version 7 | except ModuleNotFoundError: # python 3.6, 3.7 8 | from importlib_metadata import version 9 | 10 | try: 11 | md = importlib.metadata.metadata('redistimeseries') 12 | return version('redistimeseries') 13 | except: 14 | return 'dev' 15 | 16 | 17 | __version__ = get_version() 18 | -------------------------------------------------------------------------------- /redistimeseries/client.py: -------------------------------------------------------------------------------- 1 | from redis import Redis, DataError 2 | from redis.client import Pipeline 3 | from redis.client import bool_ok 4 | from redis._compat import nativestr 5 | 6 | 7 | class TSInfo(object): 8 | rules = [] 9 | labels = [] 10 | sourceKey = None 11 | chunk_count = None 12 | memory_usage = None 13 | total_samples = None 14 | retention_msecs = None 15 | last_time_stamp = None 16 | first_time_stamp = None 17 | # As of RedisTimeseries >= v1.4 max_samples_per_chunk is deprecated in favor of chunk_size 18 | max_samples_per_chunk = None 19 | chunk_size = None 20 | duplicate_policy = None 21 | 22 | def __init__(self, args): 23 | response = dict(zip(map(nativestr, args[::2]), args[1::2])) 24 | self.rules = response['rules'] 25 | self.sourceKey = response['sourceKey'] 26 | self.chunkCount = response['chunkCount'] 27 | self.memory_usage = response['memoryUsage'] 28 | self.total_samples = response['totalSamples'] 29 | self.labels = list_to_dict(response['labels']) 30 | self.retention_msecs = response['retentionTime'] 31 | self.lastTimeStamp = response['lastTimestamp'] 32 | self.first_time_stamp = response['firstTimestamp'] 33 | if 'maxSamplesPerChunk' in response: 34 | self.max_samples_per_chunk = response['maxSamplesPerChunk'] 35 | self.chunk_size = self.max_samples_per_chunk * 16 # backward compatible changes 36 | if 'chunkSize' in response: 37 | self.chunk_size = response['chunkSize'] 38 | if 'duplicatePolicy' in response: 39 | self.duplicate_policy = response['duplicatePolicy'] 40 | if type(self.duplicate_policy) == bytes: 41 | self.duplicate_policy = self.duplicate_policy.decode() 42 | 43 | 44 | def list_to_dict(aList): 45 | return {nativestr(aList[i][0]): nativestr(aList[i][1]) 46 | for i in range(len(aList))} 47 | 48 | 49 | def parse_range(response): 50 | return [tuple((l[0], float(l[1]))) for l in response] 51 | 52 | 53 | def parse_m_range(response): 54 | res = [] 55 | for item in response: 56 | res.append({nativestr(item[0]): [list_to_dict(item[1]), 57 | parse_range(item[2])]}) 58 | return res 59 | 60 | 61 | def parse_get(response): 62 | if not response: 63 | return None 64 | return int(response[0]), float(response[1]) 65 | 66 | 67 | def parse_m_get(response): 68 | res = [] 69 | for item in response: 70 | if item[2] == []: 71 | res.append({nativestr(item[0]): [list_to_dict(item[1]), None, None]}) 72 | else: 73 | res.append({nativestr(item[0]): [list_to_dict(item[1]), 74 | int(item[2][0]), float(item[2][1])]}) 75 | 76 | return res 77 | 78 | 79 | def parseToList(response): 80 | res = [] 81 | for item in response: 82 | res.append(nativestr(item)) 83 | return res 84 | 85 | 86 | class Client(object): # changed from StrictRedis 87 | """ 88 | This class subclasses redis-py's `Redis` and implements 89 | RedisTimeSeries's commands (prefixed with "ts"). 90 | The client allows to interact with RedisTimeSeries and use all of 91 | it's functionality. 92 | """ 93 | 94 | CREATE_CMD = 'TS.CREATE' 95 | ALTER_CMD = 'TS.ALTER' 96 | ADD_CMD = 'TS.ADD' 97 | MADD_CMD = 'TS.MADD' 98 | INCRBY_CMD = 'TS.INCRBY' 99 | DECRBY_CMD = 'TS.DECRBY' 100 | DEL_CMD = 'TS.DEL' 101 | CREATERULE_CMD = 'TS.CREATERULE' 102 | DELETERULE_CMD = 'TS.DELETERULE' 103 | RANGE_CMD = 'TS.RANGE' 104 | REVRANGE_CMD = 'TS.REVRANGE' 105 | MRANGE_CMD = 'TS.MRANGE' 106 | MREVRANGE_CMD = 'TS.MREVRANGE' 107 | GET_CMD = 'TS.GET' 108 | MGET_CMD = 'TS.MGET' 109 | INFO_CMD = 'TS.INFO' 110 | QUERYINDEX_CMD = 'TS.QUERYINDEX' 111 | 112 | def __init__(self, conn=None, *args, **kwargs): 113 | """ 114 | Creates a new RedisTimeSeries client. 115 | """ 116 | self.redis = conn if conn is not None else Redis(*args, **kwargs) 117 | 118 | # Set the module commands' callbacks 119 | MODULE_CALLBACKS = { 120 | self.CREATE_CMD: bool_ok, 121 | self.ALTER_CMD: bool_ok, 122 | self.CREATERULE_CMD: bool_ok, 123 | self.DELETERULE_CMD: bool_ok, 124 | self.RANGE_CMD: parse_range, 125 | self.REVRANGE_CMD: parse_range, 126 | self.MRANGE_CMD: parse_m_range, 127 | self.MREVRANGE_CMD: parse_m_range, 128 | self.GET_CMD: parse_get, 129 | self.MGET_CMD: parse_m_get, 130 | self.INFO_CMD: TSInfo, 131 | self.QUERYINDEX_CMD: parseToList, 132 | } 133 | for k in MODULE_CALLBACKS: 134 | self.redis.set_response_callback(k, MODULE_CALLBACKS[k]) 135 | 136 | @staticmethod 137 | def appendUncompressed(params, uncompressed): 138 | if uncompressed: 139 | params.extend(['UNCOMPRESSED']) 140 | 141 | @staticmethod 142 | def appendWithLabels(params, with_labels, select_labels=None): 143 | if with_labels and select_labels: 144 | raise DataError("with_labels and select_labels cannot be provided together.") 145 | 146 | if with_labels: 147 | params.extend(['WITHLABELS']) 148 | if select_labels: 149 | params.extend(['SELECTED_LABELS', *select_labels]) 150 | 151 | @staticmethod 152 | def appendGroupbyReduce(params, groupby, reduce): 153 | if groupby is not None and reduce is not None: 154 | params.extend(['GROUPBY', groupby, 'REDUCE', reduce.upper()]) 155 | 156 | @staticmethod 157 | def appendRetention(params, retention): 158 | if retention is not None: 159 | params.extend(['RETENTION', retention]) 160 | 161 | @staticmethod 162 | def appendLabels(params, labels): 163 | if labels: 164 | params.append('LABELS') 165 | for k, v in labels.items(): 166 | params.extend([k, v]) 167 | 168 | @staticmethod 169 | def appendCount(params, count): 170 | if count is not None: 171 | params.extend(['COUNT', count]) 172 | 173 | @staticmethod 174 | def appendTimestamp(params, timestamp): 175 | if timestamp is not None: 176 | params.extend(['TIMESTAMP', timestamp]) 177 | 178 | @staticmethod 179 | def appendAlign(params, align): 180 | if align is not None: 181 | params.extend(['ALIGN', align]) 182 | 183 | @staticmethod 184 | def appendAggregation(params, aggregation_type, 185 | bucket_size_msec): 186 | params.append('AGGREGATION') 187 | params.extend([aggregation_type, bucket_size_msec]) 188 | 189 | @staticmethod 190 | def appendChunkSize(params, chunk_size): 191 | if chunk_size is not None: 192 | params.extend(['CHUNK_SIZE', chunk_size]) 193 | 194 | @staticmethod 195 | def appendDuplicatePolicy(params, command, duplicate_policy): 196 | if duplicate_policy is not None: 197 | if command == 'TS.ADD': 198 | params.extend(['ON_DUPLICATE', duplicate_policy]) 199 | else: 200 | params.extend(['DUPLICATE_POLICY', duplicate_policy]) 201 | 202 | @staticmethod 203 | def appendFilerByTs(params, ts_list): 204 | if ts_list is not None: 205 | params.extend(["FILTER_BY_TS", *ts_list]) 206 | 207 | @staticmethod 208 | def appendFilerByValue(params, min_value, max_value): 209 | if min_value is not None and max_value is not None: 210 | params.extend(["FILTER_BY_VALUE", min_value, max_value]) 211 | 212 | def create(self, key, **kwargs): 213 | """ 214 | Create a new time-series. 215 | 216 | Args: 217 | key: time-series key 218 | retention_msecs: Maximum age for samples compared to last event time (in milliseconds). 219 | If None or 0 is passed then the series is not trimmed at all. 220 | uncompressed: since RedisTimeSeries v1.2, both timestamps and values are compressed by default. 221 | Adding this flag will keep data in an uncompressed form. Compression not only saves 222 | memory but usually improve performance due to lower number of memory accesses 223 | labels: Set of label-value pairs that represent metadata labels of the key. 224 | chunk_size: Each time-serie uses chunks of memory of fixed size for time series samples. 225 | You can alter the default TSDB chunk size by passing the chunk_size argument (in Bytes). 226 | duplicate_policy: since RedisTimeSeries v1.4 you can specify the duplicate sample policy ( Configure what to do on duplicate sample. ) 227 | Can be one of: 228 | - 'block': an error will occur for any out of order sample 229 | - 'first': ignore the new value 230 | - 'last': override with latest value 231 | - 'min': only override if the value is lower than the existing value 232 | - 'max': only override if the value is higher than the existing value 233 | When this is not set, the server-wide default will be used. 234 | """ 235 | retention_msecs = kwargs.get('retention_msecs', None) 236 | uncompressed = kwargs.get('uncompressed', False) 237 | labels = kwargs.get('labels', {}) 238 | chunk_size = kwargs.get('chunk_size', None) 239 | duplicate_policy = kwargs.get('duplicate_policy', None) 240 | params = [key] 241 | self.appendRetention(params, retention_msecs) 242 | self.appendUncompressed(params, uncompressed) 243 | self.appendChunkSize(params, chunk_size) 244 | self.appendDuplicatePolicy(params, self.CREATE_CMD, duplicate_policy) 245 | self.appendLabels(params, labels) 246 | 247 | return self.redis.execute_command(self.CREATE_CMD, *params) 248 | 249 | def alter(self, key, **kwargs): 250 | """ 251 | Update the retention, labels of an existing key. The parameters 252 | are the same as TS.CREATE. 253 | """ 254 | retention_msecs = kwargs.get('retention_msecs', None) 255 | labels = kwargs.get('labels', {}) 256 | duplicate_policy = kwargs.get('duplicate_policy', None) 257 | params = [key] 258 | self.appendRetention(params, retention_msecs) 259 | self.appendDuplicatePolicy(params, self.ALTER_CMD, duplicate_policy) 260 | self.appendLabels(params, labels) 261 | 262 | return self.redis.execute_command(self.ALTER_CMD, *params) 263 | 264 | def add(self, key, timestamp, value, **kwargs): 265 | """ 266 | Append (or create and append) a new sample to the series. 267 | 268 | Args: 269 | key: time-series key 270 | timestamp: timestamp of the sample. * can be used for automatic timestamp (using the system clock). 271 | value: numeric data value of the sample 272 | retention_msecs: Maximum age for samples compared to last event time (in milliseconds). 273 | If None or 0 is passed then the series is not trimmed at all. 274 | uncompressed: since RedisTimeSeries v1.2, both timestamps and values are compressed by default. 275 | Adding this flag will keep data in an uncompressed form. Compression not only saves 276 | memory but usually improve performance due to lower number of memory accesses 277 | labels: Set of label-value pairs that represent metadata labels of the key. 278 | chunk_size: Each time-serie uses chunks of memory of fixed size for time series samples. 279 | You can alter the default TSDB chunk size by passing the chunk_size argument (in Bytes). 280 | duplicate_policy: since RedisTimeSeries v1.4 you can specify the duplicate sample policy ( Configure what to do on duplicate sample. ) 281 | Can be one of: 282 | - 'block': an error will occur for any out of order sample 283 | - 'first': ignore the new value 284 | - 'last': override with latest value 285 | - 'min': only override if the value is lower than the existing value 286 | - 'max': only override if the value is higher than the existing value 287 | When this is not set, the server-wide default will be used. 288 | """ 289 | retention_msecs = kwargs.get('retention_msecs', None) 290 | uncompressed = kwargs.get('uncompressed', False) 291 | labels = kwargs.get('labels', {}) 292 | chunk_size = kwargs.get('chunk_size', None) 293 | duplicate_policy = kwargs.get('duplicate_policy', None) 294 | params = [key, timestamp, value] 295 | self.appendRetention(params, retention_msecs) 296 | self.appendUncompressed(params, uncompressed) 297 | self.appendChunkSize(params, chunk_size) 298 | self.appendDuplicatePolicy(params, self.ADD_CMD, duplicate_policy) 299 | self.appendLabels(params, labels) 300 | 301 | return self.redis.execute_command(self.ADD_CMD, *params) 302 | 303 | def madd(self, ktv_tuples): 304 | """ 305 | Appends (or creates and appends) a new ``value`` to series 306 | ``key`` with ``timestamp``. Expects a list of ``tuples`` as 307 | (``key``,``timestamp``, ``value``). Return value is an 308 | array with timestamps of insertions. 309 | """ 310 | params = [] 311 | for ktv in ktv_tuples: 312 | for item in ktv: 313 | params.append(item) 314 | 315 | return self.redis.execute_command(self.MADD_CMD, *params) 316 | 317 | def incrby(self, key, value, **kwargs): 318 | """ 319 | Increment (or create an time-series and increment) the latest sample's of a series. 320 | This command can be used as a counter or gauge that automatically gets history as a time series. 321 | 322 | Args: 323 | key: time-series key 324 | value: numeric data value of the sample 325 | timestamp: timestamp of the sample. None can be used for automatic timestamp (using the system clock). 326 | retention_msecs: Maximum age for samples compared to last event time (in milliseconds). 327 | If None or 0 is passed then the series is not trimmed at all. 328 | uncompressed: since RedisTimeSeries v1.2, both timestamps and values are compressed by default. 329 | Adding this flag will keep data in an uncompressed form. Compression not only saves 330 | memory but usually improve performance due to lower number of memory accesses 331 | labels: Set of label-value pairs that represent metadata labels of the key. 332 | chunk_size: Each time-series uses chunks of memory of fixed size for time series samples. 333 | You can alter the default TSDB chunk size by passing the chunk_size argument (in Bytes). 334 | """ 335 | timestamp = kwargs.get('timestamp', None) 336 | retention_msecs = kwargs.get('retention_msecs', None) 337 | uncompressed = kwargs.get('uncompressed', False) 338 | labels = kwargs.get('labels', {}) 339 | chunk_size = kwargs.get('chunk_size', None) 340 | params = [key, value] 341 | self.appendTimestamp(params, timestamp) 342 | self.appendRetention(params, retention_msecs) 343 | self.appendUncompressed(params, uncompressed) 344 | self.appendChunkSize(params, chunk_size) 345 | self.appendLabels(params, labels) 346 | 347 | return self.redis.execute_command(self.INCRBY_CMD, *params) 348 | 349 | def decrby(self, key, value, **kwargs): 350 | """ 351 | Decrement (or create an time-series and decrement) the latest sample's of a series. 352 | This command can be used as a counter or gauge that automatically gets history as a time series. 353 | 354 | Args: 355 | key: time-series key 356 | value: numeric data value of the sample 357 | timestamp: timestamp of the sample. None can be used for automatic timestamp (using the system clock). 358 | retention_msecs: Maximum age for samples compared to last event time (in milliseconds). 359 | If None or 0 is passed then the series is not trimmed at all. 360 | uncompressed: since RedisTimeSeries v1.2, both timestamps and values are compressed by default. 361 | Adding this flag will keep data in an uncompressed form. Compression not only saves 362 | memory but usually improve performance due to lower number of memory accesses 363 | labels: Set of label-value pairs that represent metadata labels of the key. 364 | chunk_size: Each time-serie uses chunks of memory of fixed size for time series samples. 365 | You can alter the default TSDB chunk size by passing the chunk_size argument (in Bytes). 366 | """ 367 | timestamp = kwargs.get('timestamp', None) 368 | retention_msecs = kwargs.get('retention_msecs', None) 369 | uncompressed = kwargs.get('uncompressed', False) 370 | labels = kwargs.get('labels', {}) 371 | chunk_size = kwargs.get('chunk_size', None) 372 | params = [key, value] 373 | self.appendTimestamp(params, timestamp) 374 | self.appendRetention(params, retention_msecs) 375 | self.appendUncompressed(params, uncompressed) 376 | self.appendChunkSize(params, chunk_size) 377 | self.appendLabels(params, labels) 378 | 379 | return self.redis.execute_command(self.DECRBY_CMD, *params) 380 | 381 | def delrange(self, key, from_time, to_time): 382 | """ 383 | Delete data points for a given timeseries and interval range in the form of start and end delete timestamps. 384 | The given timestamp interval is closed (inclusive), meaning start and end data points will also be deleted. 385 | Return the count for deleted items. 386 | 387 | Args: 388 | key: time-series key. 389 | from_time: Start timestamp for the range deletion. 390 | to_time: End timestamp for the range deletion. 391 | """ 392 | return self.redis.execute_command(self.DEL_CMD, key, from_time, to_time) 393 | 394 | def createrule(self, source_key, dest_key, 395 | aggregation_type, bucket_size_msec): 396 | """ 397 | Creates a compaction rule from values added to ``source_key`` 398 | into ``dest_key``. Aggregating for ``bucket_size_msec`` where an 399 | ``aggregation_type`` can be ['avg', 'sum', 'min', 'max', 400 | 'range', 'count', 'first', 'last', 'std.p', 'std.s', 'var.p', 'var.s'] 401 | """ 402 | params = [source_key, dest_key] 403 | self.appendAggregation(params, aggregation_type, bucket_size_msec) 404 | 405 | return self.redis.execute_command(self.CREATERULE_CMD, *params) 406 | 407 | def deleterule(self, source_key, dest_key): 408 | """Deletes a compaction rule""" 409 | return self.redis.execute_command(self.DELETERULE_CMD, source_key, dest_key) 410 | 411 | def __range_params(self, key, from_time, to_time, count, aggregation_type, bucket_size_msec, 412 | filter_by_ts, filter_by_min_value, filter_by_max_value, align): 413 | """ 414 | Internal method to create TS.RANGE and TS.REVRANGE arguments 415 | """ 416 | params = [key, from_time, to_time] 417 | self.appendFilerByTs(params, filter_by_ts) 418 | self.appendFilerByValue(params, filter_by_min_value, filter_by_max_value) 419 | self.appendCount(params, count) 420 | self.appendAlign(params, align) 421 | if aggregation_type is not None: 422 | self.appendAggregation(params, aggregation_type, bucket_size_msec) 423 | 424 | return params 425 | 426 | def range(self, key, from_time, to_time, count=None, aggregation_type=None, 427 | bucket_size_msec=0, filter_by_ts=None, filter_by_min_value=None, 428 | filter_by_max_value=None, align=None): 429 | """ 430 | Query a range in forward direction for a specific time-serie. 431 | 432 | Args: 433 | key: Key name for timeseries. 434 | from_time: Start timestamp for the range query. - can be used to express the minimum possible timestamp (0). 435 | to_time: End timestamp for range query, + can be used to express the maximum possible timestamp. 436 | count: Optional maximum number of returned results. 437 | aggregation_type: Optional aggregation type. Can be one of ['avg', 'sum', 'min', 'max', 'range', 'count', 438 | 'first', 'last', 'std.p', 'std.s', 'var.p', 'var.s'] 439 | bucket_size_msec: Time bucket for aggregation in milliseconds. 440 | filter_by_ts: List of timestamps to filter the result by specific timestamps. 441 | filter_by_min_value: Filter result by minimum value (must mention also filter_by_max_value). 442 | filter_by_max_value: Filter result by maximum value (must mention also filter_by_min_value). 443 | align: Timestamp for alignment control for aggregation. 444 | """ 445 | params = self.__range_params(key, from_time, to_time, count, aggregation_type, bucket_size_msec, 446 | filter_by_ts, filter_by_min_value, filter_by_max_value, align) 447 | return self.redis.execute_command(self.RANGE_CMD, *params) 448 | 449 | def revrange(self, key, from_time, to_time, count=None, aggregation_type=None, 450 | bucket_size_msec=0, filter_by_ts=None, filter_by_min_value=None, 451 | filter_by_max_value=None, align=None): 452 | """ 453 | Query a range in reverse direction for a specific time-serie. 454 | Note: This command is only available since RedisTimeSeries >= v1.4 455 | 456 | Args: 457 | key: Key name for timeseries. 458 | from_time: Start timestamp for the range query. - can be used to express the minimum possible timestamp (0). 459 | to_time: End timestamp for range query, + can be used to express the maximum possible timestamp. 460 | count: Optional maximum number of returned results. 461 | aggregation_type: Optional aggregation type. Can be one of ['avg', 'sum', 'min', 'max', 'range', 'count', 462 | 'first', 'last', 'std.p', 'std.s', 'var.p', 'var.s'] 463 | bucket_size_msec: Time bucket for aggregation in milliseconds. 464 | filter_by_ts: List of timestamps to filter the result by specific timestamps. 465 | filter_by_min_value: Filter result by minimum value (must mention also filter_by_max_value). 466 | filter_by_max_value: Filter result by maximum value (must mention also filter_by_min_value). 467 | align: Timestamp for alignment control for aggregation. 468 | """ 469 | params = self.__range_params(key, from_time, to_time, count, aggregation_type, bucket_size_msec, 470 | filter_by_ts, filter_by_min_value, filter_by_max_value, align) 471 | return self.redis.execute_command(self.REVRANGE_CMD, *params) 472 | 473 | def __mrange_params(self, aggregation_type, bucket_size_msec, count, filters, from_time, to_time, 474 | with_labels, filter_by_ts, filter_by_min_value, filter_by_max_value, groupby, 475 | reduce, select_labels, align): 476 | """ 477 | Internal method to create TS.MRANGE and TS.MREVRANGE arguments 478 | """ 479 | params = [from_time, to_time] 480 | self.appendFilerByTs(params, filter_by_ts) 481 | self.appendFilerByValue(params, filter_by_min_value, filter_by_max_value) 482 | self.appendCount(params, count) 483 | self.appendAlign(params, align) 484 | if aggregation_type is not None: 485 | self.appendAggregation(params, aggregation_type, bucket_size_msec) 486 | self.appendWithLabels(params, with_labels, select_labels) 487 | params.extend(['FILTER']) 488 | params += filters 489 | self.appendGroupbyReduce(params, groupby, reduce) 490 | return params 491 | 492 | def mrange(self, from_time, to_time, filters, count=None, aggregation_type=None, bucket_size_msec=0, 493 | with_labels=False, filter_by_ts=None, filter_by_min_value=None, filter_by_max_value=None, 494 | groupby=None, reduce=None, select_labels=None, align=None): 495 | """ 496 | Query a range across multiple time-series by filters in forward direction. 497 | 498 | Args: 499 | from_time: Start timestamp for the range query. - can be used to express the minimum possible timestamp (0). 500 | to_time: End timestamp for range query, + can be used to express the maximum possible timestamp. 501 | filters: filter to match the time-series labels. 502 | count: Optional maximum number of returned results. 503 | aggregation_type: Optional aggregation type. Can be one of ['avg', 'sum', 'min', 'max', 'range', 'count', 504 | 'first', 'last', 'std.p', 'std.s', 'var.p', 'var.s'] 505 | bucket_size_msec: Time bucket for aggregation in milliseconds. 506 | with_labels: Include in the reply the label-value pairs that represent metadata labels of the time-series. 507 | If this argument is not set, by default, an empty Array will be replied on the labels array position. 508 | filter_by_ts: List of timestamps to filter the result by specific timestamps. 509 | filter_by_min_value: Filter result by minimum value (must mention also filter_by_max_value). 510 | filter_by_max_value: Filter result by maximum value (must mention also filter_by_min_value). 511 | groupby: Grouping by fields the results (must mention also reduce). 512 | reduce: Applying reducer functions on each group. Can be one of ['sum', 'min', 'max']. 513 | select_labels: Include in the reply only a subset of the key-value pair labels of a series. 514 | align: Timestamp for alignment control for aggregation. 515 | """ 516 | params = self.__mrange_params(aggregation_type, bucket_size_msec, count, filters, from_time, to_time, 517 | with_labels, filter_by_ts, filter_by_min_value, filter_by_max_value, 518 | groupby, reduce, select_labels, align) 519 | 520 | return self.redis.execute_command(self.MRANGE_CMD, *params) 521 | 522 | def mrevrange(self, from_time, to_time, filters, count=None, aggregation_type=None, bucket_size_msec=0, 523 | with_labels=False, filter_by_ts=None, filter_by_min_value=None, filter_by_max_value=None, 524 | groupby=None, reduce=None, select_labels=None, align=None): 525 | """ 526 | Query a range across multiple time-series by filters in reverse direction. 527 | 528 | Args: 529 | from_time: Start timestamp for the range query. - can be used to express the minimum possible timestamp (0). 530 | to_time: End timestamp for range query, + can be used to express the maximum possible timestamp. 531 | filters: filter to match the time-series labels. 532 | count: Optional maximum number of returned results. 533 | aggregation_type: Optional aggregation type. Can be one of ['avg', 'sum', 'min', 'max', 'range', 'count', 534 | 'first', 'last', 'std.p', 'std.s', 'var.p', 'var.s'] 535 | bucket_size_msec: Time bucket for aggregation in milliseconds. 536 | with_labels: Include in the reply the label-value pairs that represent metadata labels of the time-series. 537 | If this argument is not set, by default, an empty Array will be replied on the labels array position. 538 | filter_by_ts: List of timestamps to filter the result by specific timestamps. 539 | filter_by_min_value: Filter result by minimum value (must mention also filter_by_max_value). 540 | filter_by_max_value: Filter result by maximum value (must mention also filter_by_min_value). 541 | groupby: Grouping by fields the results (must mention also reduce). 542 | reduce: Applying reducer functions on each group. Can be one of ['sum', 'min', 'max']. 543 | select_labels: Include in the reply only a subset of the key-value pair labels of a series. 544 | align: Timestamp for alignment control for aggregation. 545 | """ 546 | params = self.__mrange_params(aggregation_type, bucket_size_msec, count, filters, from_time, to_time, 547 | with_labels, filter_by_ts, filter_by_min_value, filter_by_max_value, 548 | groupby, reduce, select_labels, align) 549 | 550 | return self.redis.execute_command(self.MREVRANGE_CMD, *params) 551 | 552 | def get(self, key): 553 | """Gets the last sample of ``key``""" 554 | return self.redis.execute_command(self.GET_CMD, key) 555 | 556 | def mget(self, filters, with_labels=False): 557 | """Get the last samples matching the specific ``filter``.""" 558 | params = [] 559 | self.appendWithLabels(params, with_labels) 560 | params.extend(['FILTER']) 561 | params += filters 562 | return self.redis.execute_command(self.MGET_CMD, *params) 563 | 564 | def info(self, key): 565 | """Gets information of ``key``""" 566 | return self.redis.execute_command(self.INFO_CMD, key) 567 | 568 | def queryindex(self, filters): 569 | """Get all the keys matching the ``filter`` list.""" 570 | return self.redis.execute_command(self.QUERYINDEX_CMD, *filters) 571 | 572 | def pipeline(self, transaction=True, shard_hint=None): 573 | """ 574 | Return a new pipeline object that can queue multiple commands for 575 | later execution. ``transaction`` indicates whether all commands 576 | should be executed atomically. Apart from making a group of operations 577 | atomic, pipelines are useful for reducing the back-and-forth overhead 578 | between the client and server. 579 | Overridden in order to provide the right client through the pipeline. 580 | """ 581 | p = Pipeline( 582 | connection_pool=self.redis.connection_pool, 583 | response_callbacks=self.redis.response_callbacks, 584 | transaction=transaction, 585 | shard_hint=shard_hint) 586 | p.redis = p 587 | return p 588 | 589 | 590 | class Pipeline(Pipeline, Client): 591 | "Pipeline for Redis TimeSeries Client" 592 | -------------------------------------------------------------------------------- /test_commands.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import time 3 | from time import sleep 4 | from unittest import TestCase 5 | from redistimeseries.client import Client as RedisTimeSeries 6 | from redis import Redis 7 | 8 | version = None 9 | rts = None 10 | port = 6379 11 | 12 | 13 | class RedisTimeSeriesTest(TestCase): 14 | def setUp(self): 15 | global rts 16 | global version 17 | rts = RedisTimeSeries(port=port) 18 | rts.redis.flushdb() 19 | modules = rts.redis.execute_command("module", "list") 20 | if modules is not None: 21 | for module_info in modules: 22 | if module_info[1] == b'timeseries': 23 | version = int(module_info[3]) 24 | 25 | def testVersionRuntime(self): 26 | import redistimeseries as rts_pkg 27 | self.assertNotEqual("", rts_pkg.__version__) 28 | 29 | def testCreate(self): 30 | '''Test TS.CREATE calls''' 31 | self.assertTrue(rts.create(1)) 32 | self.assertTrue(rts.create(2, retention_msecs=5)) 33 | self.assertTrue(rts.create(3, labels={'Redis': 'Labs'})) 34 | self.assertTrue(rts.create(4, retention_msecs=20, labels={'Time': 'Series'})) 35 | info = rts.info(4) 36 | self.assertEqual(20, info.retention_msecs) 37 | self.assertEqual('Series', info.labels['Time']) 38 | 39 | if version is None or version < 14000: 40 | return 41 | 42 | # Test for a chunk size of 128 Bytes 43 | self.assertTrue(rts.create("time-serie-1", chunk_size=128)) 44 | info = rts.info("time-serie-1") 45 | self.assertEqual(128, info.chunk_size) 46 | 47 | # Test for duplicate policy 48 | for duplicate_policy in ["block", "last", "first", "min", "max"]: 49 | ts_name = "time-serie-ooo-{0}".format(duplicate_policy) 50 | self.assertTrue(rts.create(ts_name, duplicate_policy=duplicate_policy)) 51 | info = rts.info(ts_name) 52 | self.assertEqual(duplicate_policy, info.duplicate_policy) 53 | 54 | def testAlter(self): 55 | '''Test TS.ALTER calls''' 56 | 57 | self.assertTrue(rts.create(1)) 58 | self.assertEqual(0, rts.info(1).retention_msecs) 59 | self.assertTrue(rts.alter(1, retention_msecs=10)) 60 | self.assertEqual({}, rts.info(1).labels) 61 | self.assertEqual(10, rts.info(1).retention_msecs) 62 | self.assertTrue(rts.alter(1, labels={'Time': 'Series'})) 63 | self.assertEqual('Series', rts.info(1).labels['Time']) 64 | self.assertEqual(10, rts.info(1).retention_msecs) 65 | pipe = rts.pipeline() 66 | self.assertTrue(pipe.create(2)) 67 | 68 | if version is None or version < 14000: 69 | return 70 | info = rts.info(1) 71 | self.assertEqual(None, info.duplicate_policy) 72 | self.assertTrue(rts.alter(1, duplicate_policy='min')) 73 | info = rts.info(1) 74 | self.assertEqual('min', info.duplicate_policy) 75 | 76 | def testAdd(self): 77 | '''Test TS.ADD calls''' 78 | 79 | self.assertEqual(1, rts.add(1, 1, 1)) 80 | self.assertEqual(2, rts.add(2, 2, 3, retention_msecs=10)) 81 | self.assertEqual(3, rts.add(3, 3, 2, labels={'Redis': 'Labs'})) 82 | self.assertEqual(4, rts.add(4, 4, 2, retention_msecs=10, labels={'Redis': 'Labs', 'Time': 'Series'})) 83 | self.assertAlmostEqual(time.time(), float(rts.add(5, '*', 1)) / 1000, 2) 84 | 85 | info = rts.info(4) 86 | self.assertEqual(10, info.retention_msecs) 87 | self.assertEqual('Labs', info.labels['Redis']) 88 | 89 | if version is None or version < 14000: 90 | return 91 | 92 | # Test for a chunk size of 128 Bytes on TS.ADD 93 | self.assertTrue(rts.add("time-serie-1", 1, 10.0, chunk_size=128)) 94 | info = rts.info("time-serie-1") 95 | self.assertEqual(128, info.chunk_size) 96 | 97 | # Test for duplicate policy BLOCK 98 | self.assertEqual(1, rts.add("time-serie-add-ooo-block", 1, 5.0)) 99 | with self.assertRaises(Exception): 100 | rts.add("time-serie-add-ooo-block", 1, 5.0, duplicate_policy='block') 101 | 102 | # Test for duplicate policy LAST 103 | self.assertEqual(1, rts.add("time-serie-add-ooo-last", 1, 5.0)) 104 | self.assertEqual(1, rts.add("time-serie-add-ooo-last", 1, 10.0, duplicate_policy='last')) 105 | self.assertEqual(10.0, rts.get("time-serie-add-ooo-last")[1]) 106 | 107 | # Test for duplicate policy FIRST 108 | self.assertEqual(1, rts.add("time-serie-add-ooo-first", 1, 5.0)) 109 | self.assertEqual(1, rts.add("time-serie-add-ooo-first", 1, 10.0, duplicate_policy='first')) 110 | self.assertEqual(5.0, rts.get("time-serie-add-ooo-first")[1]) 111 | 112 | # Test for duplicate policy MAX 113 | self.assertEqual(1, rts.add("time-serie-add-ooo-max", 1, 5.0)) 114 | self.assertEqual(1, rts.add("time-serie-add-ooo-max", 1, 10.0, duplicate_policy='max')) 115 | self.assertEqual(10.0, rts.get("time-serie-add-ooo-max")[1]) 116 | 117 | # Test for duplicate policy MIN 118 | self.assertEqual(1, rts.add("time-serie-add-ooo-min", 1, 5.0)) 119 | self.assertEqual(1, rts.add("time-serie-add-ooo-min", 1, 10.0, duplicate_policy='min')) 120 | self.assertEqual(5.0, rts.get("time-serie-add-ooo-min")[1]) 121 | 122 | def testMAdd(self): 123 | '''Test TS.MADD calls''' 124 | 125 | rts.create('a') 126 | self.assertEqual([1, 2, 3], rts.madd([('a', 1, 5), ('a', 2, 10), ('a', 3, 15)])) 127 | 128 | def testIncrbyDecrby(self): 129 | '''Test TS.INCRBY and TS.DECRBY calls''' 130 | 131 | for _ in range(100): 132 | self.assertTrue(rts.incrby(1, 1)) 133 | sleep(0.001) 134 | self.assertEqual(100, rts.get(1)[1]) 135 | for _ in range(100): 136 | self.assertTrue(rts.decrby(1, 1)) 137 | sleep(0.001) 138 | self.assertEqual(0, rts.get(1)[1]) 139 | 140 | self.assertTrue(rts.incrby(2, 1.5, timestamp=5)) 141 | self.assertEqual((5, 1.5), rts.get(2)) 142 | self.assertTrue(rts.incrby(2, 2.25, timestamp=7)) 143 | self.assertEqual((7, 3.75), rts.get(2)) 144 | self.assertTrue(rts.decrby(2, 1.5, timestamp=15)) 145 | self.assertEqual((15, 2.25), rts.get(2)) 146 | if version is None or version < 14000: 147 | return 148 | 149 | # Test for a chunk size of 128 Bytes on TS.INCRBY 150 | self.assertTrue(rts.incrby("time-serie-1", 10, chunk_size=128)) 151 | info = rts.info("time-serie-1") 152 | self.assertEqual(128, info.chunk_size) 153 | 154 | # Test for a chunk size of 128 Bytes on TS.DECRBY 155 | self.assertTrue(rts.decrby("time-serie-2", 10, chunk_size=128)) 156 | info = rts.info("time-serie-2") 157 | self.assertEqual(128, info.chunk_size) 158 | 159 | def testDelRange(self): 160 | '''Test TS.DEL calls''' 161 | 162 | try: 163 | rts.delrange('test', 0, 100) 164 | except Exception as e: 165 | self.assertEqual("TSDB: the key does not exist", e.__str__()) 166 | 167 | for i in range(100): 168 | rts.add(1, i, i % 7) 169 | self.assertEqual(22, rts.delrange(1, 0, 21)) 170 | self.assertEqual([], rts.range(1, 0, 21)) 171 | self.assertEqual([(22, 1.0)], rts.range(1, 22, 22)) 172 | 173 | def testCreateRule(self): 174 | '''Test TS.CREATERULE and TS.DELETERULE calls''' 175 | 176 | # test rule creation 177 | time = 100 178 | rts.create(1) 179 | rts.create(2) 180 | rts.createrule(1, 2, 'avg', 100) 181 | for i in range(50): 182 | rts.add(1, time + i * 2, 1) 183 | rts.add(1, time + i * 2 + 1, 2) 184 | rts.add(1, time * 2, 1.5) 185 | self.assertAlmostEqual(rts.get(2)[1], 1.5) 186 | info = rts.info(1) 187 | self.assertEqual(info.rules[0][1], 100) 188 | 189 | # test rule deletion 190 | rts.deleterule(1, 2) 191 | info = rts.info(1) 192 | self.assertFalse(info.rules) 193 | 194 | def testRange(self): 195 | '''Test TS.RANGE calls which returns range by key''' 196 | 197 | for i in range(100): 198 | rts.add(1, i, i % 7) 199 | self.assertEqual(100, len(rts.range(1, 0, 200))) 200 | for i in range(100): 201 | rts.add(1, i + 200, i % 7) 202 | self.assertEqual(200, len(rts.range(1, 0, 500))) 203 | # last sample isn't returned 204 | self.assertEqual(20, len(rts.range(1, 0, 500, aggregation_type='avg', bucket_size_msec=10))) 205 | self.assertEqual(10, len(rts.range(1, 0, 500, count=10))) 206 | self.assertEqual(2, len(rts.range(1, 0, 500, filter_by_ts=[i for i in range(10, 20)], filter_by_min_value=1, 207 | filter_by_max_value=2))) 208 | self.assertEqual([(0, 10.0), (10, 1.0)], 209 | rts.range(1, 0, 10, aggregation_type='count', bucket_size_msec=10, align='+')) 210 | self.assertEqual([(0, 5.0), (5, 6.0)], 211 | rts.range(1, 0, 10, aggregation_type='count', bucket_size_msec=10, align=5)) 212 | 213 | def testRevRange(self): 214 | '''Test TS.REVRANGE calls which returns reverse range by key''' 215 | # TS.REVRANGE is available since RedisTimeSeries >= v1.4 216 | if version is None or version < 14000: 217 | return 218 | 219 | for i in range(100): 220 | rts.add(1, i, i % 7) 221 | self.assertEqual(100, len(rts.range(1, 0, 200))) 222 | for i in range(100): 223 | rts.add(1, i + 200, i % 7) 224 | self.assertEqual(200, len(rts.range(1, 0, 500))) 225 | # first sample isn't returned 226 | self.assertEqual(20, len(rts.revrange(1, 0, 500, aggregation_type='avg', bucket_size_msec=10))) 227 | self.assertEqual(10, len(rts.revrange(1, 0, 500, count=10))) 228 | self.assertEqual(2, len(rts.revrange(1, 0, 500, filter_by_ts=[i for i in range(10, 20)], filter_by_min_value=1, 229 | filter_by_max_value=2))) 230 | self.assertEqual([(10, 1.0), (0, 10.0)], 231 | rts.revrange(1, 0, 10, aggregation_type='count', bucket_size_msec=10, align='+')) 232 | self.assertEqual([(1, 10.0), (0, 1.0)], 233 | rts.revrange(1, 0, 10, aggregation_type='count', bucket_size_msec=10, align=1)) 234 | 235 | def testMultiRange(self): 236 | '''Test TS.MRANGE calls which returns range by filter''' 237 | 238 | rts.create(1, labels={'Test': 'This', 'team': 'ny'}) 239 | rts.create(2, labels={'Test': 'This', 'Taste': 'That', 'team': 'sf'}) 240 | for i in range(100): 241 | rts.add(1, i, i % 7) 242 | rts.add(2, i, i % 11) 243 | 244 | res = rts.mrange(0, 200, filters=['Test=This']) 245 | self.assertEqual(2, len(res)) 246 | self.assertEqual(100, len(res[0]['1'][1])) 247 | 248 | res = rts.mrange(0, 200, filters=['Test=This'], count=10) 249 | self.assertEqual(10, len(res[0]['1'][1])) 250 | 251 | for i in range(100): 252 | rts.add(1, i + 200, i % 7) 253 | res = rts.mrange(0, 500, filters=['Test=This'], 254 | aggregation_type='avg', bucket_size_msec=10) 255 | self.assertEqual(2, len(res)) 256 | self.assertEqual(20, len(res[0]['1'][1])) 257 | 258 | # test withlabels 259 | self.assertEqual({}, res[0]['1'][0]) 260 | res = rts.mrange(0, 200, filters=['Test=This'], with_labels=True) 261 | self.assertEqual({'Test': 'This', 'team': 'ny'}, res[0]['1'][0]) 262 | # test with selected labels 263 | res = rts.mrange(0, 200, filters=['Test=This'], select_labels=['team']) 264 | self.assertEqual({'team': 'ny'}, res[0]['1'][0]) 265 | self.assertEqual({'team': 'sf'}, res[1]['2'][0]) 266 | # test with filterby 267 | res = rts.mrange(0, 200, filters=['Test=This'], filter_by_ts=[i for i in range(10, 20)], 268 | filter_by_min_value=1, filter_by_max_value=2) 269 | self.assertEqual([(15, 1.0), (16, 2.0)], res[0]['1'][1]) 270 | # test groupby 271 | res = rts.mrange(0, 3, filters=['Test=This'], groupby='Test', reduce='sum') 272 | self.assertEqual([(0, 0.0), (1, 2.0), (2, 4.0), (3, 6.0)], res[0]['Test=This'][1]) 273 | res = rts.mrange(0, 3, filters=['Test=This'], groupby='Test', reduce='max') 274 | self.assertEqual([(0, 0.0), (1, 1.0), (2, 2.0), (3, 3.0)], res[0]['Test=This'][1]) 275 | res = rts.mrange(0, 3, filters=['Test=This'], groupby='team', reduce='min') 276 | self.assertEqual(2, len(res)) 277 | self.assertEqual([(0, 0.0), (1, 1.0), (2, 2.0), (3, 3.0)], res[0]['team=ny'][1]) 278 | self.assertEqual([(0, 0.0), (1, 1.0), (2, 2.0), (3, 3.0)], res[1]['team=sf'][1]) 279 | # test align 280 | res = rts.mrange(0, 10, filters=['team=ny'], aggregation_type='count', bucket_size_msec=10, align='-') 281 | self.assertEqual([(0, 10.0), (10, 1.0)], res[0]['1'][1]) 282 | res = rts.mrange(0, 10, filters=['team=ny'], aggregation_type='count', bucket_size_msec=10, align=5) 283 | self.assertEqual([(0, 5.0), (5, 6.0)], res[0]['1'][1]) 284 | 285 | def testMultiReverseRange(self): 286 | '''Test TS.MREVRANGE calls which returns range by filter''' 287 | # TS.MREVRANGE is available since RedisTimeSeries >= v1.4 288 | if version is None or version < 14000: 289 | return 290 | 291 | rts.create(1, labels={'Test': 'This', 'team': 'ny'}) 292 | rts.create(2, labels={'Test': 'This', 'Taste': 'That', 'team': 'sf'}) 293 | for i in range(100): 294 | rts.add(1, i, i % 7) 295 | rts.add(2, i, i % 11) 296 | 297 | res = rts.mrange(0, 200, filters=['Test=This']) 298 | self.assertEqual(2, len(res)) 299 | self.assertEqual(100, len(res[0]['1'][1])) 300 | 301 | res = rts.mrange(0, 200, filters=['Test=This'], count=10) 302 | self.assertEqual(10, len(res[0]['1'][1])) 303 | 304 | for i in range(100): 305 | rts.add(1, i + 200, i % 7) 306 | res = rts.mrevrange(0, 500, filters=['Test=This'], 307 | aggregation_type='avg', bucket_size_msec=10) 308 | self.assertEqual(2, len(res)) 309 | self.assertEqual(20, len(res[0]['1'][1])) 310 | 311 | # test withlabels 312 | self.assertEqual({}, res[0]['1'][0]) 313 | res = rts.mrevrange(0, 200, filters=['Test=This'], with_labels=True) 314 | self.assertEqual({'Test': 'This', 'team': 'ny'}, res[0]['1'][0]) 315 | # test with selected labels 316 | res = rts.mrevrange(0, 200, filters=['Test=This'], select_labels=['team']) 317 | self.assertEqual({'team': 'ny'}, res[0]['1'][0]) 318 | self.assertEqual({'team': 'sf'}, res[1]['2'][0]) 319 | # test filterby 320 | res = rts.mrevrange(0, 200, filters=['Test=This'], filter_by_ts=[i for i in range(10, 20)], 321 | filter_by_min_value=1, filter_by_max_value=2) 322 | self.assertEqual([(16, 2.0), (15, 1.0)], res[0]['1'][1]) 323 | # test groupby 324 | res = rts.mrevrange(0, 3, filters=['Test=This'], groupby='Test', reduce='sum') 325 | self.assertEqual([(3, 6.0), (2, 4.0), (1, 2.0), (0, 0.0)], res[0]['Test=This'][1]) 326 | res = rts.mrevrange(0, 3, filters=['Test=This'], groupby='Test', reduce='max') 327 | self.assertEqual([(3, 3.0), (2, 2.0), (1, 1.0), (0, 0.0)], res[0]['Test=This'][1]) 328 | res = rts.mrevrange(0, 3, filters=['Test=This'], groupby='team', reduce='min') 329 | self.assertEqual(2, len(res)) 330 | self.assertEqual([(3, 3.0), (2, 2.0), (1, 1.0), (0, 0.0)], res[0]['team=ny'][1]) 331 | self.assertEqual([(3, 3.0), (2, 2.0), (1, 1.0), (0, 0.0)], res[1]['team=sf'][1]) 332 | # test align 333 | res = rts.mrevrange(0, 10, filters=['team=ny'], aggregation_type='count', bucket_size_msec=10, align='-') 334 | self.assertEqual([(10, 1.0), (0, 10.0)], res[0]['1'][1]) 335 | res = rts.mrevrange(0, 10, filters=['team=ny'], aggregation_type='count', bucket_size_msec=10, align=1) 336 | self.assertEqual([(1, 10.0), (0, 1.0)], res[0]['1'][1]) 337 | 338 | def testGet(self): 339 | '''Test TS.GET calls''' 340 | 341 | name = 'test' 342 | rts.create(name) 343 | self.assertEqual(None, rts.get(name)) 344 | rts.add(name, 2, 3) 345 | self.assertEqual(2, rts.get(name)[0]) 346 | rts.add(name, 3, 4) 347 | self.assertEqual(4, rts.get(name)[1]) 348 | 349 | def testMGet(self): 350 | '''Test TS.MGET calls''' 351 | rts.create(1, labels={'Test': 'This'}) 352 | rts.create(2, labels={'Test': 'This', 'Taste': 'That'}) 353 | act_res = rts.mget(['Test=This']) 354 | exp_res = [{'1': [{}, None, None]}, {'2': [{}, None, None]}] 355 | self.assertEqual(act_res, exp_res) 356 | rts.add(1, '*', 15) 357 | rts.add(2, '*', 25) 358 | res = rts.mget(['Test=This']) 359 | self.assertEqual(15, res[0]['1'][2]) 360 | self.assertEqual(25, res[1]['2'][2]) 361 | res = rts.mget(['Taste=That']) 362 | self.assertEqual(25, res[0]['2'][2]) 363 | 364 | # test with_labels 365 | self.assertEqual({}, res[0]['2'][0]) 366 | res = rts.mget(['Taste=That'], with_labels=True) 367 | self.assertEqual({'Taste': 'That', 'Test': 'This'}, res[0]['2'][0]) 368 | 369 | def testInfo(self): 370 | '''Test TS.INFO calls''' 371 | rts.create(1, retention_msecs=5, labels={'currentLabel': 'currentData'}) 372 | info = rts.info(1) 373 | self.assertEqual(5, info.retention_msecs) 374 | self.assertEqual(info.labels['currentLabel'], 'currentData') 375 | if version is None or version < 14000: 376 | return 377 | self.assertEqual(None, info.duplicate_policy) 378 | 379 | rts.create('time-serie-2', duplicate_policy='min') 380 | info = rts.info('time-serie-2') 381 | self.assertEqual('min', info.duplicate_policy) 382 | 383 | def testQueryIndex(self): 384 | '''Test TS.QUERYINDEX calls''' 385 | rts.create(1, labels={'Test': 'This'}) 386 | rts.create(2, labels={'Test': 'This', 'Taste': 'That'}) 387 | self.assertEqual(2, len(rts.queryindex(['Test=This']))) 388 | self.assertEqual(1, len(rts.queryindex(['Taste=That']))) 389 | self.assertEqual(['2'], rts.queryindex(['Taste=That'])) 390 | 391 | def testPipeline(self): 392 | '''Test pipeline''' 393 | pipeline = rts.pipeline() 394 | pipeline.create('with_pipeline') 395 | for i in range(100): 396 | pipeline.add('with_pipeline', i, 1.1 * i) 397 | pipeline.execute() 398 | 399 | info = rts.info('with_pipeline') 400 | self.assertEqual(info.lastTimeStamp, 99) 401 | self.assertEqual(info.total_samples, 100) 402 | self.assertEqual(rts.get('with_pipeline')[1], 99 * 1.1) 403 | 404 | def testUncompressed(self): 405 | '''Test uncompressed chunks''' 406 | rts.create('compressed') 407 | rts.create('uncompressed', uncompressed=True) 408 | compressed_info = rts.info('compressed') 409 | uncompressed_info = rts.info('uncompressed') 410 | self.assertNotEqual(compressed_info.memory_usage, uncompressed_info.memory_usage) 411 | 412 | def testPool(self): 413 | redis = Redis(port=port) 414 | client = RedisTimeSeries(conn=redis, port=666) 415 | 416 | name = 'test' 417 | client.create(name) 418 | self.assertEqual(None, client.get(name)) 419 | client.add(name, 2, 3) 420 | self.assertEqual(2, client.get(name)[0]) 421 | info = client.info(name) 422 | self.assertEqual(1, info.total_samples) 423 | 424 | 425 | if __name__ == '__main__': 426 | unittest.main() 427 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | [tox] 2 | skipsdist = True 3 | envlist = linters 4 | 5 | [flake8] 6 | max-complexity = 10 7 | ignore = E127,E265,E266,E301,E501 8 | srcdir = redistimeseries 9 | show-source = true 10 | exclude =.git,.tox,dist,doc,*/__pycache__/*,*test*.py 11 | 12 | [testenv:cover] 13 | whitelist_externals = find 14 | commands_pre = 15 | pip install --upgrade pip 16 | setenv = 17 | REDIS_PORT = 6379 18 | commands = 19 | coverage run test_commands.py 20 | codecov 21 | 22 | [testenv:linters] 23 | commands = 24 | # flake8 --show-source 25 | vulture redistimeseries --min-confidence 80 26 | bandit redistimeseries/** 27 | --------------------------------------------------------------------------------