├── .circleci
└── config.yml
├── .flake8
├── .github
├── ISSUE_TEMPLATE
│ ├── BUG_REPORT.yml
│ ├── FEATURE_REQUEST.yml
│ └── SUPPORT.yml
├── PULL_REQUEST_TEMPLATE.md
├── dependabot.yml
└── workflows
│ ├── codeql-analysis.yml
│ ├── pylint.yml
│ ├── python-publish.yml
│ └── semantic.yml
├── .gitignore
├── .markdownlint.yml
├── CHANGELOG.md
├── Examples
├── __init__.py
├── basic_ssl_example.py
├── batching_example.py
├── cloud_dedicated_query.py
├── cloud_dedicated_write.py
├── community
│ ├── custom_url.py
│ └── database_transfer.py
├── config.py
├── example.csv
├── file-import
│ ├── csv_write.py
│ ├── feather_write.py
│ ├── json_write.py
│ ├── orc_write.py
│ ├── out.csv
│ ├── out.feather
│ ├── out.json
│ ├── out.orc
│ ├── out.parquet
│ ├── out_orig.csv
│ ├── out_orig.json
│ ├── parquet_write.py
│ └── write_file_parse_options.py
├── flight_options_example.py
├── handle_http_error.py
├── handle_query_error.py
├── pandas_write.py
├── pokemon-trainer
│ ├── basic-query.py
│ ├── basic-write-errorhandling.py
│ ├── basic-write-writeoptions.py
│ ├── basic-write.py
│ ├── cookbook.ipynb
│ ├── kanto.parquet
│ ├── pandas-write.py
│ ├── pokemon.csv
│ ├── write-batching-flight-calloptions.py
│ └── write-batching.py
├── query_async.py
└── query_type.py
├── LICENSE
├── README.md
├── docs
└── readme.md
├── influxdb_client_3
├── __init__.py
├── exceptions
│ ├── __init__.py
│ └── exceptions.py
├── py.typed
├── query
│ ├── __init__.py
│ └── query_api.py
├── read_file.py
├── version.py
└── write_client
│ ├── __init__.py
│ ├── _sync
│ ├── __init__.py
│ ├── api_client.py
│ └── rest.py
│ ├── client
│ ├── __init__.py
│ ├── _base.py
│ ├── influxdb_client.py
│ ├── logging_handler.py
│ ├── util
│ │ ├── __init__.py
│ │ ├── date_utils.py
│ │ ├── date_utils_pandas.py
│ │ ├── helpers.py
│ │ └── multiprocessing_helper.py
│ ├── warnings.py
│ ├── write
│ │ ├── __init__.py
│ │ ├── dataframe_serializer.py
│ │ ├── point.py
│ │ ├── polars_dataframe_serializer.py
│ │ └── retry.py
│ └── write_api.py
│ ├── configuration.py
│ ├── domain
│ ├── __init__.py
│ ├── write_precision.py
│ └── write_precision_converter.py
│ ├── extras.py
│ ├── rest.py
│ └── service
│ ├── __init__.py
│ ├── _base_service.py
│ ├── signin_service.py
│ ├── signout_service.py
│ └── write_service.py
├── pytest.ini
├── python-logo.png
├── setup.py
└── tests
├── __init__.py
├── data
└── iot.csv
├── test_api_client.py
├── test_dataframe_serializer.py
├── test_date_helper.py
├── test_deep_merge.py
├── test_influxdb_client_3.py
├── test_influxdb_client_3_integration.py
├── test_merge_options.py
├── test_point.py
├── test_polars.py
├── test_query.py
├── test_write_file.py
├── test_write_local_server.py
├── test_write_precision_converter.py
└── util
├── __init__.py
└── mocks.py
/.circleci/config.yml:
--------------------------------------------------------------------------------
1 | version: 2.1
2 |
3 | commands:
4 | client-test:
5 | description: "Run tests"
6 | parameters:
7 | python-image:
8 | type: string
9 | pytest-marker:
10 | type: string
11 | steps:
12 | - restore_cache:
13 | name: Restoring Pip Cache
14 | keys:
15 | - &cache-key pip-cache-v11-<< parameters.python-image >>-{{ checksum "setup.py" }}
16 | - pip-cache-v11-<< parameters.python-image >>-
17 | - run:
18 | name: "Running tests"
19 | command: |
20 | python --version
21 | mkdir test-reports || true
22 | pip install . --user
23 | pip install .\[dataframe\] --user
24 | pip install .\[test\] --user
25 | pytest -m "<< parameters.pytest-marker >>" tests --junitxml=test-reports/junit.xml --cov=./influxdb_client_3 --cov-report xml:coverage.xml
26 | - save_cache:
27 | name: Saving Pip Cache
28 | key: *cache-key
29 | paths:
30 | - ".venv"
31 | - "~/.cache/pip"
32 | - "/usr/local/lib/site-python"
33 | when: always
34 | jobs:
35 | tests-python:
36 | parameters:
37 | python-image:
38 | type: string
39 | default: &default-python "cimg/python:3.8"
40 | pytest-marker:
41 | type: string
42 | default: "not integration"
43 | docker:
44 | - image: << parameters.python-image >>
45 | environment:
46 | PIPENV_VENV_IN_PROJECT: true
47 | steps:
48 | - checkout
49 | - client-test:
50 | python-image: << parameters.python-image >>
51 | pytest-marker: << parameters.pytest-marker >>
52 | - store_test_results:
53 | path: test-reports
54 | - run:
55 | name: Collecting coverage reports
56 | command: |
57 | curl -Os https://uploader.codecov.io/latest/linux/codecov
58 | curl -Os https://uploader.codecov.io/latest/linux/codecov.SHA256SUM
59 | curl -Os https://uploader.codecov.io/latest/linux/codecov.SHA256SUM.sig
60 | curl -s https://keybase.io/codecovsecurity/pgp_keys.asc | gpg --no-default-keyring --keyring trustedkeys.gpg --import
61 | gpgv codecov.SHA256SUM.sig codecov.SHA256SUM
62 | shasum -a 256 -c codecov.SHA256SUM
63 | chmod +x ./codecov
64 | ./codecov
65 | check-code-style:
66 | docker:
67 | - image: *default-python
68 | environment:
69 | PIPENV_VENV_IN_PROJECT: true
70 | steps:
71 | - checkout
72 | - run:
73 | name: Checks style consistency of setup.py.
74 | command: |
75 | pip install flake8 --user
76 | flake8 setup.py
77 | - run:
78 | name: Checks style consistency across sources.
79 | command: |
80 | pip install flake8 --user
81 | flake8 influxdb_client_3/
82 | - run:
83 | name: Checks style consistency across tests.
84 | command: |
85 | pip install flake8 --user
86 | flake8 tests/
87 | - run:
88 | name: Checks style consistency across examples.
89 | command: |
90 | pip install flake8 --user
91 | flake8 Examples/
92 | check-twine:
93 | docker:
94 | - image: *default-python
95 | environment:
96 | PIPENV_VENV_IN_PROJECT: true
97 | steps:
98 | - checkout
99 | - run:
100 | name: Checks that the description will render correctly on PyPI.
101 | command: |
102 | pip install --upgrade pip
103 | pip install 'twine>=5.1,<6.1' --user
104 | python setup.py sdist bdist_wheel
105 | twine check dist/*
106 | check-docstyle:
107 | docker:
108 | - image: *default-python
109 | environment:
110 | PIPENV_VENV_IN_PROJECT: true
111 | steps:
112 | - checkout
113 | - run:
114 | name: Checks compliance with Python docstring convention.
115 | command: |
116 | pip install pydocstyle --user
117 | pydocstyle --count influxdb_client_3
118 |
119 | workflows:
120 | version: 2
121 | build:
122 | when:
123 | not:
124 | equal: [ scheduled_pipeline, << pipeline.trigger_source >> ]
125 | jobs:
126 | - check-code-style
127 | # - check-docstyle
128 | - check-twine
129 | - tests-python:
130 | name: test-3.8
131 | python-image: "cimg/python:3.8"
132 | - tests-python:
133 | name: test-3.9
134 | python-image: "cimg/python:3.9"
135 | - tests-python:
136 | name: test-3.10
137 | python-image: "cimg/python:3.10"
138 | - tests-python:
139 | name: test-3.11
140 | python-image: "cimg/python:3.11"
141 | - tests-python:
142 | name: test-3.12
143 | python-image: "cimg/python:3.12"
144 | - tests-python:
145 | name: test-3.13
146 | python-image: "cimg/python:3.13"
147 | - tests-python:
148 | requires:
149 | - test-3.8
150 | - test-3.9
151 | - test-3.10
152 | - test-3.11
153 | - test-3.12
154 | - test-3.13
155 | name: test-integration
156 | python-image: *default-python
157 | pytest-marker: "integration"
158 |
159 | nightly:
160 | when:
161 | equal: [ scheduled_pipeline, << pipeline.trigger_source >> ]
162 | jobs:
163 | - tests-python
164 |
--------------------------------------------------------------------------------
/.flake8:
--------------------------------------------------------------------------------
1 | [flake8]
2 | count = True
3 | max-line-length = 120
4 |
5 | # W504: Line break occurred after a binary operator
6 | ignore = W504
7 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/BUG_REPORT.yml:
--------------------------------------------------------------------------------
1 | name: Bug Report
2 | description: Create a bug report to help us improve
3 | labels: ["bug"]
4 | body:
5 | - type: markdown
6 | attributes:
7 | value: |
8 | Thanks for taking time to fill out this bug report! We reserve this repository issues for bugs with reproducible problems.
9 | Please redirect any questions about the client usage to our [Community Slack](https://app.slack.com/huddle/TH8RGQX5Z/C02UDUPLQKA) or [Community Page](https://community.influxdata.com/) we have a lot of talented community members there who could help answer your question more quickly.
10 |
11 | * Please add a :+1: or comment on a similar existing bug report instead of opening a new one.
12 | * Please check whether the bug can be reproduced with the latest release.
13 | - type: textarea
14 | id: specifications
15 | attributes:
16 | label: Specifications
17 | description: Describe the steps to reproduce the bug.
18 | value: |
19 | * Client Version:
20 | * InfluxDB Version:
21 | * Platform:
22 | validations:
23 | required: true
24 | - type: textarea
25 | id: reproduce
26 | attributes:
27 | label: Code sample to reproduce problem
28 | description: Provide a code sample that reproduces the problem
29 | value: |
30 | ```python
31 | ```
32 | validations:
33 | required: true
34 | - type: textarea
35 | id: expected-behavior
36 | attributes:
37 | label: Expected behavior
38 | description: Describe what you expected to happen when you performed the above steps.
39 | validations:
40 | required: true
41 | - type: textarea
42 | id: actual-behavior
43 | attributes:
44 | label: Actual behavior
45 | description: Describe what actually happened when you performed the above steps.
46 | validations:
47 | required: true
48 | - type: textarea
49 | id: additional-info
50 | attributes:
51 | label: Additional info
52 | description: Include gist of relevant config, logs, etc.
53 | validations:
54 | required: false
55 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/FEATURE_REQUEST.yml:
--------------------------------------------------------------------------------
1 | name: Feature request
2 | description: Create a feature request to make client more awesome
3 | labels: ["feature request"]
4 | body:
5 | - type: markdown
6 | attributes:
7 | value: |
8 | Thanks for taking time to share with us this feature request! Please describe why you would like this feature to be added to the client, how you plan to use it to make your life better.
9 | - type: textarea
10 | id: use-case
11 | attributes:
12 | label: Use Case
13 | description: Describe how you plan to use this feature.
14 | validations:
15 | required: true
16 | - type: textarea
17 | id: expected-behavior
18 | attributes:
19 | label: Expected behavior
20 | description: Describe what you expected to happen when you performed the above steps.
21 | validations:
22 | required: true
23 | - type: textarea
24 | id: actual-behavior
25 | attributes:
26 | label: Actual behavior
27 | description: Describe what actually happened when you performed the above steps.
28 | validations:
29 | required: true
30 | - type: textarea
31 | id: additional-info
32 | attributes:
33 | label: Additional info
34 | description: Include gist of relevant config, logs, etc.
35 | validations:
36 | required: false
37 |
38 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/SUPPORT.yml:
--------------------------------------------------------------------------------
1 | name: Support request
2 | description: Open a support request
3 | labels: ["support"]
4 | body:
5 | - type: markdown
6 | attributes:
7 | value: |
8 | WOAHH, hold up. This isn't the best place for support questions.
9 | You can get a faster response on slack or forums:
10 |
11 | Please redirect any QUESTIONS about Client usage to
12 | - InfluxData Slack Channel: https://app.slack.com/huddle/TH8RGQX5Z/C02UDUPLQKA
13 | - InfluxData Community Site: https://community.influxdata.com
14 |
15 | - type: textarea
16 | attributes:
17 | label: "Please direct all support questions to slack or the forums. Thank you."
18 |
--------------------------------------------------------------------------------
/.github/PULL_REQUEST_TEMPLATE.md:
--------------------------------------------------------------------------------
1 | Closes #
2 |
3 | ## Proposed Changes
4 |
5 | _Briefly describe your proposed changes:_
6 |
7 | ## Checklist
8 |
9 |
10 |
11 | - [ ] CHANGELOG.md updated
12 | - [ ] Rebased/mergeable
13 | - [ ] A test has been added if appropriate
14 | - [ ] Tests pass
15 | - [ ] Commit messages are [conventional](https://www.conventionalcommits.org/en/v1.0.0/)
16 | - [ ] Sign [CLA](https://www.influxdata.com/legal/cla/) (if not already signed)
17 |
--------------------------------------------------------------------------------
/.github/dependabot.yml:
--------------------------------------------------------------------------------
1 | version: 2
2 | updates:
3 | - package-ecosystem: "pip"
4 | directory: "/"
5 | schedule:
6 | interval: "daily"
7 | open-pull-requests-limit: 10
8 |
--------------------------------------------------------------------------------
/.github/workflows/codeql-analysis.yml:
--------------------------------------------------------------------------------
1 | name: "CodeQL"
2 |
3 | on:
4 | push:
5 | branches: [ main ]
6 | pull_request:
7 | branches: [ main ]
8 |
9 | jobs:
10 | CodeQL-Build:
11 | runs-on: ubuntu-latest
12 |
13 | permissions:
14 | security-events: write
15 | actions: read
16 | contents: read
17 |
18 | steps:
19 | - name: Checkout repository
20 | uses: actions/checkout@v3
21 |
22 | - name: Initialize CodeQL
23 | uses: github/codeql-action/init@v2
24 | with:
25 | languages: python
26 |
27 | - name: Autobuild
28 | uses: github/codeql-action/autobuild@v2
29 |
30 | - name: Perform CodeQL Analysis
31 | uses: github/codeql-action/analyze@v2
32 |
--------------------------------------------------------------------------------
/.github/workflows/pylint.yml:
--------------------------------------------------------------------------------
1 | name: Pylint
2 |
3 | on: [workflow_dispatch]
4 |
5 | jobs:
6 | build:
7 | runs-on: ubuntu-latest
8 | strategy:
9 | matrix:
10 | python-version: ["3.8", "3.9", "3.10", "3.11"]
11 | steps:
12 | - uses: actions/checkout@v3
13 | - name: Set up Python ${{ matrix.python-version }}
14 | uses: actions/setup-python@v3
15 | with:
16 | python-version: ${{ matrix.python-version }}
17 | - name: Install dependencies
18 | run: |
19 | python -m pip install --upgrade pip
20 | pip install pylint
21 | - name: Analysing the code with pylint
22 | run: |
23 | pylint $(find influxdb_client_3 -name '*.py')
24 |
--------------------------------------------------------------------------------
/.github/workflows/python-publish.yml:
--------------------------------------------------------------------------------
1 | # This workflow will upload a Python Package using Twine when a release is created
2 | # For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python#publishing-to-package-registries
3 |
4 | # This workflow uses actions that are not certified by GitHub.
5 | # They are provided by a third-party and are governed by
6 | # separate terms of service, privacy policy, and support
7 | # documentation.
8 |
9 | name: Upload Python Package
10 |
11 | on:
12 | release:
13 | types: [published]
14 |
15 | permissions:
16 | contents: read
17 |
18 | jobs:
19 | deploy:
20 |
21 | runs-on: ubuntu-latest
22 |
23 | steps:
24 | - uses: actions/checkout@v3
25 | - name: Set up Python
26 | uses: actions/setup-python@v3
27 | with:
28 | python-version: '3.x'
29 | - name: Install dependencies
30 | run: |
31 | python -m pip install --upgrade pip
32 | pip install build
33 | - name: Build package
34 | run: python -m build
35 | - name: Publish package
36 | uses: pypa/gh-action-pypi-publish@27b31702a0e7fc50959f5ad993c78deac1bdfc29
37 | with:
38 | user: __token__
39 | password: ${{ secrets.PYPI_API_TOKEN }}
40 |
--------------------------------------------------------------------------------
/.github/workflows/semantic.yml:
--------------------------------------------------------------------------------
1 | ---
2 | name: "Semantic PR and Commit Messages"
3 |
4 | on:
5 | pull_request:
6 | types: [opened, reopened, synchronize, edited]
7 | branches:
8 | - main
9 |
10 | jobs:
11 | semantic:
12 | uses: influxdata/validate-semantic-github-messages/.github/workflows/semantic.yml@main
13 | with:
14 | CHECK_PR_TITLE_OR_ONE_COMMIT: true
15 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | config.json
2 | __pychache__
3 | influxdb_client_3/__pycache__/
4 | .venv
5 | dist
6 | build
7 | pyinflux3*.egg-info
8 | .DS_store
9 | __pycache__
10 | .idea
11 | *.egg-info/
12 | temp/
13 | test-reports/
14 | coverage.xml
15 | .coverage
16 |
--------------------------------------------------------------------------------
/.markdownlint.yml:
--------------------------------------------------------------------------------
1 | {
2 | "MD024": {
3 | "siblings_only": true
4 | },
5 | }
6 |
--------------------------------------------------------------------------------
/CHANGELOG.md:
--------------------------------------------------------------------------------
1 | # Change Log
2 |
3 | ## 0.14.0 [unreleased]
4 |
5 | ### Features
6 |
7 | 1. [#142](https://github.com/InfluxCommunity/influxdb3-python/pull/142): Support fast writes without waiting for WAL
8 | persistence:
9 | - New write option (`WriteOptions.no_sync`) added: `True` value means faster write but without the confirmation that
10 | the data was persisted. Default value: `False`.
11 | - **Supported by self-managed InfluxDB 3 Core and Enterprise servers only!**
12 | - Also configurable via environment variable (`INFLUX_WRITE_NO_SYNC`).
13 | - Long precision string values added from v3 HTTP API: `"nanosecond"`, `"microsecond"`, `"millisecond"`,
14 | `"second"` ( in addition to the existing `"ns"`, `"us"`, `"ms"`, `"s"`).
15 |
16 | ## 0.13.0 [2025-05-20]
17 |
18 | ### Features
19 |
20 | 1. [#130](https://github.com/InfluxCommunity/influxdb3-python/pull/130): Remove org parameters from the example code because It is not mandatory in Influxdb3
21 | 2. [#139](https://github.com/InfluxCommunity/influxdb3-python/pull/139): Supports environment variables with the same name as other clients
22 | 3. [#140](https://github.com/InfluxCommunity/influxdb3-python/pull/140): Query api will throw `InfluxdbClientQueryError` when receiving `ArrowException` from gRPC servers
23 |
24 | ## 0.12.0 [2025-03-26]
25 |
26 | ### Features
27 |
28 | 1. [#123](https://github.com/InfluxCommunity/influxdb3-python/pull/123): Introduces `query_async()` method. From this release the client now has a `query_async()` method that takes advantage of asyncio's event loop to run query calls in their own executor.
29 |
30 | For example:
31 | ```python
32 | table = await client.query_async(query)
33 | ```
34 |
35 | ### Bug Fixes
36 |
37 | 1. [#121](https://github.com/InfluxCommunity/influxdb3-python/pull/121): Fix use of arguments `verify_ssl` and `ssl_ca_cert` in `QueryApi`.
38 |
39 | ## 0.11.0 [2025-02-27]
40 |
41 | ### Bug Fixes
42 |
43 | 1. [#119](https://github.com/InfluxCommunity/influxdb3-python/pull/119): Fix use of `proxy` argument in client and query_api to use in channel solution for GRPC proxy.
44 |
45 | ## 0.10.0 [2024-11-27]
46 |
47 | ### Bug Fixes
48 |
49 | 1. [#113](https://github.com/InfluxCommunity/influxdb3-python/pull/113): Fix import error of `PolarsDataframeSerializer` in batching mode
50 |
51 | ## 0.9.0 [2024-09-13]
52 |
53 | ### Features
54 |
55 | 1. [#108](https://github.com/InfluxCommunity/influxdb3-python/pull/108): Better expose access to response headers in `InfluxDBError`. Example `handle_http_error` added.
56 | 2. [#112](https://github.com/InfluxCommunity/influxdb3-python/pull/112): Update batching examples, add integration tests of batching.
57 |
58 | ### Bug Fixes
59 |
60 | 1. [#107](https://github.com/InfluxCommunity/influxdb3-python/pull/107): Missing `py.typed` in distribution package
61 | 1. [#111](https://github.com/InfluxCommunity/influxdb3-python/pull/111): Reduce log level of disposal of batch processor to DEBUG
62 |
63 | ## 0.8.0 [2024-08-12]
64 |
65 | ### Features
66 |
67 | 1. [#101](https://github.com/InfluxCommunity/influxdb3-python/pull/101): Add support for InfluxDB Edge (OSS) authentication
68 |
69 | ### Bug Fixes
70 |
71 | 1. [#100](https://github.com/InfluxCommunity/influxdb3-python/pull/100): InfluxDB Edge (OSS) error handling
72 | 1. [#105](https://github.com/InfluxCommunity/influxdb3-python/pull/105): Importing Polars serialization module
73 |
74 | ## 0.7.0 [2024-07-11]
75 |
76 | ### Bug Fixes
77 |
78 | 1. [#95](https://github.com/InfluxCommunity/influxdb3-python/pull/95): `Polars` is optional dependency
79 | 1. [#99](https://github.com/InfluxCommunity/influxdb3-python/pull/99): Skip infinite values during serialization to line protocol
80 |
81 | ## 0.6.1 [2024-06-25]
82 |
83 | ### Bug Fixes
84 |
85 | 1. [#98](https://github.com/InfluxCommunity/influxdb3-python/pull/98): Missing declaration for `query` module
86 |
87 | ## 0.6.0 [2024-06-24]
88 |
89 | ### Features
90 |
91 | 1. [#89](https://github.com/InfluxCommunity/influxdb3-python/pull/89): Use `datetime.fromisoformat` over `dateutil.parse` in Python 3.11+
92 | 1. [#92](https://github.com/InfluxCommunity/influxdb3-python/pull/92): Update `user-agent` header value to `influxdb3-python/{VERSION}` and add it to queries as well.
93 |
94 | ### Bug Fixes
95 |
96 | 1. [#86](https://github.com/InfluxCommunity/influxdb3-python/pull/86): Refactor to `timezone` specific `datetime` helpers to avoid use deprecated functions
97 |
98 | ## 0.5.0 [2024-05-17]
99 |
100 | ### Features
101 |
102 | 1. [#88](https://github.com/InfluxCommunity/influxdb3-python/pull/88): Add support for named query parameters:
103 | ```python
104 | from influxdb_client_3 import InfluxDBClient3
105 |
106 | with InfluxDBClient3(host="https://us-east-1-1.aws.cloud2.influxdata.com",
107 | token="my-token",
108 | database="my-database") as client:
109 |
110 | table = client.query("select * from cpu where host=$host", query_parameters={"host": "server01"})
111 |
112 | print(table.to_pandas())
113 |
114 | ```
115 |
116 | ### Bug Fixes
117 |
118 | 1. [#87](https://github.com/InfluxCommunity/influxdb3-python/pull/87): Fix examples to use `write_options` instead of the object name `WriteOptions`
119 |
120 | ### Others
121 |
122 | 1. [#84](https://github.com/InfluxCommunity/influxdb3-python/pull/84): Enable packaging type information - `py.typed`
123 |
124 | ## 0.4.0 [2024-04-17]
125 |
126 | ### Bugfix
127 |
128 | 1. [#77](https://github.com/InfluxCommunity/influxdb3-python/pull/77): Support using pandas nullable types
129 |
130 | ### Others
131 |
132 | 1. [#80](https://github.com/InfluxCommunity/influxdb3-python/pull/80): Integrate code style check into CI
133 |
--------------------------------------------------------------------------------
/Examples/__init__.py:
--------------------------------------------------------------------------------
1 | # used mainly to resolve local utility helpers like config.py
2 |
--------------------------------------------------------------------------------
/Examples/basic_ssl_example.py:
--------------------------------------------------------------------------------
1 | import os
2 | import time
3 |
4 | import pyarrow
5 |
6 | from config import Config
7 | from influxdb_client_3 import InfluxDBClient3
8 |
9 | bad_cert = """-----BEGIN CERTIFICATE-----
10 | MIIFDTCCAvWgAwIBAgIUYzpfisy9xLrhiZd+D9vOdzC3+iswDQYJKoZIhvcNAQEL
11 | BQAwFjEUMBIGA1UEAwwLdGVzdGhvc3QuaW8wHhcNMjUwMjI4MTM1NTMyWhcNMzUw
12 | MjI2MTM1NTMyWjAWMRQwEgYDVQQDDAt0ZXN0aG9zdC5pbzCCAiIwDQYJKoZIhvcN
13 | AQEBBQADggIPADCCAgoCggIBAN1lwqXYP8UMvjb56SpUEj2OpoEDRfLeWrEiHkOl
14 | xoymvJGaXZNEpDXo2TTdysCoYWEjz9IY6GlqSo2Yssf5BZkQwMOw7MdyRwCigzrh
15 | OAKbyCfsvEgfNFrXEdSDpaxW++5SToeErudYXc+sBfnI1NB4W3GBGqqIvx8fqaB3
16 | 1EU9ql2sKKxI0oYIQD/If9rQEyLFKeWdD8iT6YST1Vugkvd34NPmaqV5+pjdSb4z
17 | a8olavwUoslqFUeILqIq+WZZbOlgCcJYKcBAmELRnsxGaABRtMwMZx+0D+oKo4Kl
18 | QQtOcER+RHkBHyYFghZIBnzudfbP9NadknOz3AilJbJolXfXJqeQhRD8Ob49kkhe
19 | OwjAppHnaZGWjYZMLIfnwwXBwkS7bSwF16Wot83cpL46Xvg6xcl12An4JaoF798Q
20 | cXyYrWCgvbqjVR7694gxqLGzk138AKTDSbER1h1rfqCqkk7soE0oWCs7jiCk2XvD
21 | 49qVfHtd50KYJ4/yP1XL0PmLL0Hw1kvOxLVkFENc1zkoYXJRt2Ec6j9dajmGlsFn
22 | 0bLLap6UIlIGQFuvcLf4bvsIi9FICy2jBjaIdM4UAWbReG+52+180HEleAwi5bAN
23 | HY61WVXc4X+N0E2y8HWc1QaRioU7R4XZ5HXKs7OTWkKFZUU2JDFHAKdiiAU78qLU
24 | 7GApAgMBAAGjUzBRMB0GA1UdDgQWBBT2vPFo0mzh9ls4xJUiAgSK+B5LpTAfBgNV
25 | HSMEGDAWgBT2vPFo0mzh9ls4xJUiAgSK+B5LpTAPBgNVHRMBAf8EBTADAQH/MA0G
26 | CSqGSIb3DQEBCwUAA4ICAQC4TJNPx476qhiMi8anISv9lo9cnLju+qNhcz7wupBH
27 | 3Go6bVQ7TCbSt2QpAyY64mdnRqHsXeGvZXCnabOpeKRDeAPBtRjc6yNKuXybqFtn
28 | W3PZEs/OYc659TUA+MoBzSXYStN9yiiYXyVFqVn+Rw6kM9tKh0GgAU7f5P+8IGuR
29 | gXJbCjkbdJO7JUiVGEEmkjUHyqFxMHaZ8V6uazs52qIFyt7OYQTeV9HdoW8D9vAt
30 | GfzYwzRDzbsZeIJqqDzLe7NOyxEyqZHCbtNpGcOyaLOl7ZBS52WsqaUZtL+9PjqD
31 | 2TWj4WUFkOWQpTvWKHqM6//Buv4GjnTBShQKm+h+rxcGkdRMF6/sKwxPbr39P3RJ
32 | TMfJA3u5UuowT44VaA2jkQzqIbxH9+3EA+0qPbqPJchOSr0pHSncqvR9FYcr7ayN
33 | b6UDFnjeliyEqqksUO0arbvaO9FfB0kH8lU1NOKaQNO++Xj69GZMC6s721cNdad0
34 | qqcdtyXWeOBBchguYDrSUIgLnUTHEwwzOmcNQ36hO5eX282BJy3ZLT3JU6MJopjz
35 | vkbDDAxSrpZMcaoAWSrxgJAETeYiO4YbfORIzPkwdUkEIr6XY02Pi7MdkDGQ5hiB
36 | TavA8+oXRa4b9BR3bCWcg8S/t4uOTTLkeTcQbONPh5A5IRySLCU+CwqB+/+VlO8X
37 | Aw==
38 | -----END CERTIFICATE-----"""
39 |
40 |
41 | def write_cert(cert, file_name):
42 | f = open(file_name, "w")
43 | f.write(cert)
44 | f.close()
45 |
46 |
47 | def remove_cert(file_name):
48 | os.remove(file_name)
49 |
50 |
51 | def print_results(results: list):
52 | print("%-6s%-6s%-6s%-24s" % ("id", "speed", "ticks", "time"))
53 | for result in results:
54 | print("%-6s%-6.2f%-6i%-24s" % (result['id'], result['speed'], result['ticks'], result['time']))
55 |
56 |
57 | def main() -> None:
58 | print("Main")
59 | temp_cert_file = "temp_cert.pem"
60 | conf = Config()
61 |
62 | write_and_query_with_explicit_sys_cert(conf)
63 |
64 | write_cert(bad_cert, temp_cert_file)
65 | query_with_verify_ssl_off(conf, temp_cert_file)
66 | remove_cert(temp_cert_file)
67 |
68 |
69 | def write_and_query_with_explicit_sys_cert(conf):
70 | print("\nwrite and query with typical linux system cert\n")
71 | with InfluxDBClient3(token=conf.token,
72 | host=conf.host,
73 | database=conf.database,
74 | ssl_ca_cert="/etc/ssl/certs/ca-certificates.crt",
75 | verify_ssl=True) as _client:
76 | now = time.time_ns()
77 | lp = f"escooter,id=zx80 speed=3.14,ticks=42i {now - (10 * 1_000_000_000)}"
78 | _client.write(lp)
79 |
80 | query = "SELECT * FROM \"escooter\" ORDER BY time DESC"
81 | reader: pyarrow.Table = _client.query(query, mode="")
82 | print_results(reader.to_pylist())
83 |
84 |
85 | def query_with_verify_ssl_off(conf, cert):
86 | print("\nquerying with verify_ssl off\n")
87 |
88 | # Note that the passed root cert above is bad
89 | # Switch verify_ssl to True to throw SSL_ERROR_SSL
90 | with InfluxDBClient3(token=conf.token,
91 | host=conf.host,
92 | database=conf.database,
93 | ssl_ca_cert=cert,
94 | verify_ssl=False) as _client:
95 |
96 | query = "SELECT * FROM \"escooter\" ORDER BY time DESC"
97 | reader: pyarrow.Table = _client.query(query, mode="")
98 | print_results(reader.to_pylist())
99 |
100 |
101 | if __name__ == "__main__":
102 | main()
103 |
--------------------------------------------------------------------------------
/Examples/batching_example.py:
--------------------------------------------------------------------------------
1 | import datetime
2 | import random
3 | import time
4 |
5 | from bson import ObjectId
6 |
7 | import influxdb_client_3 as InfluxDBClient3
8 | from influxdb_client_3 import write_client_options, WritePrecision, WriteOptions, InfluxDBError
9 |
10 | from config import Config
11 |
12 |
13 | class BatchingCallback(object):
14 |
15 | def __init__(self):
16 | self.write_status_msg = None
17 | self.write_count = 0
18 | self.retry_count = 0
19 | self.start = time.time_ns()
20 |
21 | def success(self, conf, data: str):
22 | print(f"Written batch: {conf}, data: {data}")
23 | self.write_count += 1
24 | self.write_status_msg = f"SUCCESS: {self.write_count} writes"
25 |
26 | def error(self, conf, data: str, exception: InfluxDBError):
27 | print(f"Cannot write batch: {conf}, data: {data} due: {exception}")
28 | self.write_status_msg = f"FAILURE - cause: {exception}"
29 |
30 | def retry(self, conf, data: str, exception: InfluxDBError):
31 | print(f"Retryable error occurs for batch: {conf}, data: {data} retry: {exception}")
32 | self.retry_count += 1
33 |
34 | def elapsed(self) -> int:
35 | return time.time_ns() - self.start
36 |
37 |
38 | def main() -> None:
39 | conf = Config()
40 |
41 | # Creating 5.000 gatewayId values as MongoDB ObjectIDs
42 | gatewayIds = [ObjectId() for x in range(0, 100)]
43 |
44 | # Setting decimal precision to 2
45 | precision = 2
46 |
47 | # Setting timestamp for first sensor reading
48 | sample_window_days = 7
49 | now = datetime.datetime.now()
50 | now = now - datetime.timedelta(days=sample_window_days)
51 | target_sample_count = sample_window_days * 24 * 60 * 6
52 |
53 | callback = BatchingCallback()
54 |
55 | write_options = WriteOptions(batch_size=5_000,
56 | flush_interval=10_000,
57 | jitter_interval=2_000,
58 | retry_interval=5_000,
59 | max_retries=5,
60 | max_retry_delay=30_000,
61 | max_close_wait=600_000,
62 | exponential_base=2)
63 |
64 | wco = write_client_options(success_callback=callback.success,
65 | error_callback=callback.error,
66 | retry_callback=callback.retry,
67 | write_options=write_options)
68 |
69 | # Opening InfluxDB client with a batch size of 5k points or flush interval
70 | # of 10k ms and gzip compression
71 | with InfluxDBClient3.InfluxDBClient3(token=conf.token,
72 | host=conf.host,
73 | database=conf.database,
74 | enable_gzip=True,
75 | write_client_options=wco) as _client:
76 | # Creating iterator for one hour worth of data (6 sensor readings per
77 | # minute)
78 | print(f"Writing {target_sample_count} data points.")
79 | for i in range(0, target_sample_count):
80 | # Adding 10 seconds to timestamp of previous sensor reading
81 | now = now + datetime.timedelta(seconds=10)
82 | # Iterating over gateways
83 | for gatewayId in gatewayIds:
84 | # Creating random test data for 12 fields to be stored in
85 | # timeseries database
86 | bcW = random.randrange(1501)
87 | bcWh = round(random.uniform(0, 4.17), precision)
88 | bdW = random.randrange(71)
89 | bdWh = round(random.uniform(0, 0.12), precision)
90 | cPvWh = round(random.uniform(0.51, 27.78), precision)
91 | cW = random.randrange(172, 10001)
92 | cWh = round(random.uniform(0.51, 27.78), precision)
93 | eWh = round(random.uniform(0, 41.67), precision)
94 | iWh = round(random.uniform(0, 16.67), precision)
95 | pW = random.randrange(209, 20001)
96 | pWh = round(random.uniform(0.58, 55.56), precision)
97 | scWh = round(random.uniform(0.58, 55.56), precision)
98 | # Creating point to be ingested into InfluxDB
99 | p = InfluxDBClient3.Point("stream").tag(
100 | "gatewayId",
101 | str(gatewayId)).field(
102 | "bcW",
103 | bcW).field(
104 | "bcWh",
105 | bcWh).field(
106 | "bdW",
107 | bdW).field(
108 | "bdWh",
109 | bdWh).field(
110 | "cPvWh",
111 | cPvWh).field(
112 | "cW",
113 | cW).field(
114 | "cWh",
115 | cWh).field(
116 | "eWh",
117 | eWh).field(
118 | "iWh",
119 | iWh).field(
120 | "pW",
121 | pW).field(
122 | "pWh",
123 | pWh).field(
124 | "scWh",
125 | scWh).time(
126 | now.strftime('%Y-%m-%dT%H:%M:%SZ'),
127 | WritePrecision.S)
128 |
129 | # Writing point (InfluxDB automatically batches writes into sets of
130 | # 5k points)
131 | _client.write(record=p)
132 |
133 | print(callback.write_status_msg)
134 | print(f"Write retries: {callback.retry_count}")
135 | print(f"Wrote {target_sample_count} data points.")
136 | print(f"Elapsed time ms: {int(callback.elapsed() / 1_000_000)}")
137 |
138 |
139 | if __name__ == "__main__":
140 | main()
141 |
--------------------------------------------------------------------------------
/Examples/cloud_dedicated_query.py:
--------------------------------------------------------------------------------
1 | from config import Config
2 | import influxdb_client_3 as InfluxDBClient3
3 |
4 | config = Config()
5 |
6 | client = InfluxDBClient3.InfluxDBClient3(
7 | token=config.token,
8 | host=config.host,
9 | database=config.database)
10 |
11 | table = client.query(
12 | query="SELECT * FROM flight WHERE time > now() - 4h",
13 | language="influxql")
14 |
15 | print(table.to_pandas())
16 |
--------------------------------------------------------------------------------
/Examples/cloud_dedicated_write.py:
--------------------------------------------------------------------------------
1 | from config import Config
2 | import influxdb_client_3 as InfluxDBClient3
3 | from influxdb_client_3 import WriteOptions
4 | import pandas as pd
5 | import numpy as np
6 |
7 | config = Config()
8 |
9 | client = InfluxDBClient3.InfluxDBClient3(
10 | token=config.token,
11 | host=config.host,
12 | database=config.database,
13 | write_options=WriteOptions(
14 | batch_size=500,
15 | flush_interval=10_000,
16 | jitter_interval=2_000,
17 | retry_interval=5_000,
18 | max_retries=5,
19 | max_retry_delay=30_000,
20 | max_close_wait=300_000,
21 | exponential_base=2,
22 | write_type='batching'))
23 |
24 |
25 | # Create a dataframe
26 | df = pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]})
27 |
28 |
29 | # Create a range of datetime values
30 | dates = pd.date_range(start='2024-09-08', end='2024-09-09', freq='5min')
31 |
32 | # Create a DataFrame with random data and datetime index
33 | df = pd.DataFrame(
34 | np.random.randn(
35 | len(dates),
36 | 3),
37 | index=dates,
38 | columns=[
39 | 'Column 1',
40 | 'Column 2',
41 | 'Column 3'])
42 | df['tagkey'] = 'Hello World'
43 |
44 | print(df)
45 |
46 | # Write the DataFrame to InfluxDB
47 | client.write(df, data_frame_measurement_name='table',
48 | data_frame_tag_columns=['tagkey'])
49 |
--------------------------------------------------------------------------------
/Examples/community/custom_url.py:
--------------------------------------------------------------------------------
1 | import random
2 |
3 | import pandas as pd
4 |
5 | from influxdb_client_3 import InfluxDBClient3, InfluxDBError, WriteOptions, write_client_options
6 |
7 |
8 | class BatchingCallback(object):
9 |
10 | def success(self, conf, data: str):
11 | print(f"Written batch: {conf}, data: {data}")
12 |
13 | def error(self, conf, data: str, exception: InfluxDBError):
14 | print(f"Cannot write batch: {conf}, data: {data} due: {exception}")
15 |
16 | def retry(self, conf, data: str, exception: InfluxDBError):
17 | print(f"Retryable error occurs for batch: {conf}, data: {data} retry: {exception}")
18 |
19 |
20 | callback = BatchingCallback()
21 |
22 | write_options = WriteOptions(batch_size=100,
23 | flush_interval=10_000,
24 | jitter_interval=2_000,
25 | retry_interval=5_000,
26 | max_retries=5,
27 | max_retry_delay=30_000,
28 | exponential_base=2)
29 |
30 | wco = write_client_options(success_callback=callback.success,
31 | error_callback=callback.error,
32 | retry_callback=callback.retry,
33 | write_options=write_options
34 | )
35 |
36 | client = InfluxDBClient3(
37 | token="",
38 | host="https://eu-central-1-1.aws.cloud2.influxdata.com:442",
39 | database="pokemon-codex", enable_gzip=True, write_client_options=wco, write_port_overwrite=443,
40 | query_port_overwrite=443)
41 |
42 | now = pd.Timestamp.now(tz='UTC').floor('ms')
43 |
44 | # Lists of possible trainers
45 | trainers = ["ash", "brock", "misty", "gary", "jessie", "james"]
46 |
47 | # Read the CSV into a DataFrame
48 | pokemon_df = pd.read_csv(
49 | "https://gist.githubusercontent.com/ritchie46/cac6b337ea52281aa23c049250a4ff03/raw/89a957ff3919d90e6ef2d34235e6bf22304f3366/pokemon.csv") # noqa: E501
50 |
51 | # Creating an empty list to store the data
52 | data = []
53 |
54 | # Dictionary to keep track of the number of times each trainer has caught each Pokémon
55 | trainer_pokemon_counts = {}
56 |
57 | # Number of entries we want to create
58 | num_entries = 1000
59 |
60 | # Generating random data
61 | for i in range(num_entries):
62 | trainer = random.choice(trainers)
63 |
64 | # Randomly select a row from pokemon_df
65 | random_pokemon = pokemon_df.sample().iloc[0]
66 | caught = random_pokemon['Name']
67 |
68 | # Count the number of times this trainer has caught this Pokémon
69 | if (trainer, caught) in trainer_pokemon_counts:
70 | trainer_pokemon_counts[(trainer, caught)] += 1
71 | else:
72 | trainer_pokemon_counts[(trainer, caught)] = 1
73 |
74 | # Get the number for this combination of trainer and Pokémon
75 | num = trainer_pokemon_counts[(trainer, caught)]
76 |
77 | entry = {
78 | "trainer": trainer,
79 | "id": f"{0000 + random_pokemon['#']:04d}",
80 | "num": str(num),
81 | "caught": caught,
82 | "level": random.randint(5, 20),
83 | "attack": random_pokemon['Attack'],
84 | "defense": random_pokemon['Defense'],
85 | "hp": random_pokemon['HP'],
86 | "speed": random_pokemon['Speed'],
87 | "type1": random_pokemon['Type 1'],
88 | "type2": random_pokemon['Type 2'],
89 | "timestamp": now
90 | }
91 | data.append(entry)
92 |
93 | # Convert the list of dictionaries to a DataFrame
94 | caught_pokemon_df = pd.DataFrame(data).set_index('timestamp')
95 |
96 | # Print the DataFrame
97 | print(caught_pokemon_df)
98 |
99 | try:
100 | client.write(caught_pokemon_df, data_frame_measurement_name='caught',
101 | data_frame_tag_columns=['trainer', 'id', 'num'])
102 | except Exception as e:
103 | print(f"Error writing point: {e}")
104 |
--------------------------------------------------------------------------------
/Examples/community/database_transfer.py:
--------------------------------------------------------------------------------
1 | import time
2 |
3 | import influxdb_client_3 as InfluxDBClient3
4 | from influxdb_client_3 import write_client_options, WriteOptions, InfluxDBError
5 |
6 |
7 | class BatchingCallback(object):
8 |
9 | def success(self, conf, data: str):
10 | print(f"Written batch: {conf}, data: {data}")
11 |
12 | def error(self, conf, data: str, exception: InfluxDBError):
13 | print(f"Cannot write batch: {conf}, data: {data} due: {exception}")
14 |
15 | def retry(self, conf, data: str, exception: InfluxDBError):
16 | print(f"Retryable error occurs for batch: {conf}, data: {data} retry: {exception}")
17 |
18 |
19 | # InfluxDB connection details
20 | token = ""
21 | dbfrom = "a"
22 | dbto = "b"
23 | url = "eu-central-1-1.aws.cloud2.influxdata.com"
24 | measurement = "airSensors"
25 | taglist = []
26 |
27 | callback = BatchingCallback()
28 |
29 | write_options = WriteOptions(batch_size=5_000,
30 | flush_interval=10_000,
31 | jitter_interval=2_000,
32 | retry_interval=5_000,
33 | max_retries=5,
34 | max_retry_delay=30_000,
35 | exponential_base=2)
36 |
37 | wco = write_client_options(success_callback=callback.success,
38 | error_callback=callback.error,
39 | retry_callback=callback.retry,
40 | write_options=write_options
41 | )
42 | # Opening InfluxDB client with a batch size of 5k points or flush interval
43 | # of 10k ms and gzip compression
44 | with InfluxDBClient3.InfluxDBClient3(token=token,
45 | host=url,
46 | enable_gzip=True, write_client_options=wco) as _client:
47 | query = f"SHOW TAG KEYS FROM {measurement}"
48 | tags = _client.query(query=query, language="influxql", database=dbfrom)
49 | tags = tags.to_pydict()
50 | taglist = tags['tagKey']
51 |
52 | query = f"SELECT * FROM {measurement}"
53 | reader = _client.query(query=query, language="influxql", database=dbfrom, mode="chunk")
54 | try:
55 | while True:
56 | batch, buff = reader.read_chunk()
57 | print("batch:")
58 | pd = batch.to_pandas()
59 | pd = pd.set_index('time')
60 | print(pd)
61 | _client.write(database=dbto, record=pd, data_frame_measurement_name=measurement,
62 | data_frame_tag_columns=taglist)
63 | time.sleep(2)
64 | except StopIteration:
65 | print("No more chunks to read")
66 |
--------------------------------------------------------------------------------
/Examples/config.py:
--------------------------------------------------------------------------------
1 | import os
2 | import json
3 |
4 |
5 | class Config:
6 | def __init__(self):
7 | self.host = os.getenv('INFLUXDB_HOST') or 'https://us-east-1-1.aws.cloud2.influxdata.com/'
8 | self.token = os.getenv('INFLUXDB_TOKEN') or 'my-token'
9 | self.database = os.getenv('INFLUXDB_DATABASE') or 'my-db'
10 |
11 | def __str__(self):
12 | return json.dumps(self.__dict__)
13 |
--------------------------------------------------------------------------------
/Examples/file-import/csv_write.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import influxdb_client_3 as InfluxDBClient3
3 | from influxdb_client_3 import write_client_options, WriteOptions, InfluxDBError
4 |
5 |
6 | class BatchingCallback(object):
7 |
8 | def __init__(self):
9 | self.write_count = 0
10 |
11 | def success(self, conf, data: str):
12 | self.write_count += 1
13 | print(f"Written batch: {conf}, data: {data}")
14 |
15 | def error(self, conf, data: str, exception: InfluxDBError):
16 | print(f"Cannot write batch: {conf}, data: {data} due: {exception}")
17 |
18 | def retry(self, conf, data: str, exception: InfluxDBError):
19 | print(f"Retryable error occurs for batch: {conf}, data: {data} retry: {exception}")
20 |
21 |
22 | def main() -> None:
23 |
24 | # allow detailed inspection
25 | logging.basicConfig(level=logging.DEBUG)
26 |
27 | callback = BatchingCallback()
28 |
29 | write_options = WriteOptions(batch_size=100,
30 | flush_interval=10_000,
31 | jitter_interval=2_000,
32 | retry_interval=5_000,
33 | max_retries=5,
34 | max_retry_delay=30_000,
35 | exponential_base=2)
36 |
37 | wco = write_client_options(success_callback=callback.success,
38 | error_callback=callback.error,
39 | retry_callback=callback.retry,
40 | write_options=write_options
41 | )
42 |
43 | """
44 | token: access token generated in cloud
45 | host: ATTN could be another AWS region or even another cloud provider
46 | database: should have retention policy 'forever' to handle older sample data timestamps
47 | write_client_options: see above
48 | debug: allows low-level inspection of communications and context-manager termination
49 | """
50 | with InfluxDBClient3.InfluxDBClient3(
51 | token="INSERT_TOKEN",
52 | host="https://us-east-1-1.aws.cloud2.influxdata.com/",
53 | database="example_data_forever",
54 | write_client_options=wco,
55 | debug=True) as client:
56 | client.write_file(
57 | file='./out.csv',
58 | timestamp_column='time', tag_columns=["provider", "machineID"])
59 |
60 | print(f'DONE writing from csv in {callback.write_count} batch(es)')
61 |
62 |
63 | if __name__ == "__main__":
64 | main()
65 |
--------------------------------------------------------------------------------
/Examples/file-import/feather_write.py:
--------------------------------------------------------------------------------
1 | import influxdb_client_3 as InfluxDBClient3
2 | from influxdb_client_3 import write_client_options, WriteOptions, InfluxDBError
3 |
4 |
5 | class BatchingCallback(object):
6 |
7 | def success(self, conf, data: str):
8 | print(f"Written batch: {conf}, data: {data}")
9 |
10 | def error(self, conf, data: str, exception: InfluxDBError):
11 | print(f"Cannot write batch: {conf}, data: {data} due: {exception}")
12 |
13 | def retry(self, conf, data: str, exception: InfluxDBError):
14 | print(f"Retryable error occurs for batch: {conf}, data: {data} retry: {exception}")
15 |
16 |
17 | callback = BatchingCallback()
18 |
19 | write_options = WriteOptions(batch_size=500,
20 | flush_interval=10_000,
21 | jitter_interval=2_000,
22 | retry_interval=5_000,
23 | max_retries=5,
24 | max_retry_delay=30_000,
25 | exponential_base=2)
26 |
27 | wco = write_client_options(success_callback=callback.success,
28 | error_callback=callback.error,
29 | retry_callback=callback.retry,
30 | write_options=write_options
31 | )
32 |
33 | with InfluxDBClient3.InfluxDBClient3(
34 | token="INSERT_TOKEN",
35 | host="eu-central-1-1.aws.cloud2.influxdata.com",
36 | database="python", write_client_options=wco) as client:
37 | client.write_file(
38 | file='./out.feather',
39 | timestamp_column='time', tag_columns=["provider", "machineID"])
40 |
--------------------------------------------------------------------------------
/Examples/file-import/json_write.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import influxdb_client_3 as InfluxDBClient3
3 | from influxdb_client_3 import write_client_options, WriteOptions, InfluxDBError
4 |
5 |
6 | class BatchingCallback(object):
7 |
8 | def __init__(self):
9 | self.write_count = 0
10 |
11 | def success(self, conf, data: str):
12 | self.write_count += 1
13 | print(f"Written batch: {conf}, data: {data}")
14 |
15 | def error(self, conf, data: str, exception: InfluxDBError):
16 | print(f"Cannot write batch: {conf}, data: {data} due: {exception}")
17 |
18 | def retry(self, conf, data: str, exception: InfluxDBError):
19 | print(f"Retryable error occurs for batch: {conf}, data: {data} retry: {exception}")
20 |
21 |
22 | def main() -> None:
23 |
24 | # allow detailed inspection
25 | logging.basicConfig(level=logging.DEBUG)
26 |
27 | callback = BatchingCallback()
28 |
29 | write_options = WriteOptions(batch_size=100,
30 | flush_interval=10_000,
31 | jitter_interval=2_000,
32 | retry_interval=5_000,
33 | max_retries=5,
34 | max_retry_delay=30_000,
35 | exponential_base=2)
36 |
37 | wco = write_client_options(success_callback=callback.success,
38 | error_callback=callback.error,
39 | retry_callback=callback.retry,
40 | write_options=write_options
41 | )
42 | """
43 | token: access token generated in cloud
44 | host: ATTN could be another AWS region or even another cloud provider
45 | database: should have retention policy 'forever' to handle older sample data timestamps
46 | write_client_options: see above
47 | debug: allows low-level inspection of communications and context-manager termination
48 | """
49 | with InfluxDBClient3.InfluxDBClient3(
50 | token="INSERT_TOKEN",
51 | host="https://us-east-1-1.aws.cloud2.influxdata.com/",
52 | database="example_data_forever",
53 | write_client_options=wco,
54 | debug=True) as client:
55 | client.write_file(
56 | file='./out.json',
57 | timestamp_column='time',
58 | tag_columns=["provider", "machineID"],
59 | date_unit='ns')
60 |
61 | print(f"DONE writing from json in {callback.write_count} batch(es)")
62 |
63 |
64 | if __name__ == "__main__":
65 | main()
66 |
--------------------------------------------------------------------------------
/Examples/file-import/orc_write.py:
--------------------------------------------------------------------------------
1 | import influxdb_client_3 as InfluxDBClient3
2 | from influxdb_client_3 import write_client_options, WriteOptions, InfluxDBError
3 |
4 |
5 | class BatchingCallback(object):
6 |
7 | def success(self, conf, data: str):
8 | print(f"Written batch: {conf}, data: {data}")
9 |
10 | def error(self, conf, data: str, exception: InfluxDBError):
11 | print(f"Cannot write batch: {conf}, data: {data} due: {exception}")
12 |
13 | def retry(self, conf, data: str, exception: InfluxDBError):
14 | print(f"Retryable error occurs for batch: {conf}, data: {data} retry: {exception}")
15 |
16 |
17 | callback = BatchingCallback()
18 |
19 | write_options = WriteOptions(batch_size=500,
20 | flush_interval=10_000,
21 | jitter_interval=2_000,
22 | retry_interval=5_000,
23 | max_retries=5,
24 | max_retry_delay=30_000,
25 | exponential_base=2)
26 |
27 | wco = write_client_options(success_callback=callback.success,
28 | error_callback=callback.error,
29 | retry_callback=callback.retry,
30 | write_options=write_options
31 | )
32 |
33 | with InfluxDBClient3.InfluxDBClient3(
34 | token="INSERT_TOKEN",
35 | host="eu-central-1-1.aws.cloud2.influxdata.com",
36 | database="python") as client:
37 | client.write_file(
38 | file='./out.orc',
39 | timestamp_column='time', tag_columns=["provider", "machineID"])
40 |
--------------------------------------------------------------------------------
/Examples/file-import/out.feather:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/InfluxCommunity/influxdb3-python/7b9e8027abc22eeb9ce7236f46b68aa31b706c2b/Examples/file-import/out.feather
--------------------------------------------------------------------------------
/Examples/file-import/out.orc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/InfluxCommunity/influxdb3-python/7b9e8027abc22eeb9ce7236f46b68aa31b706c2b/Examples/file-import/out.orc
--------------------------------------------------------------------------------
/Examples/file-import/out.parquet:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/InfluxCommunity/influxdb3-python/7b9e8027abc22eeb9ce7236f46b68aa31b706c2b/Examples/file-import/out.parquet
--------------------------------------------------------------------------------
/Examples/file-import/parquet_write.py:
--------------------------------------------------------------------------------
1 | import influxdb_client_3 as InfluxDBClient3
2 | from influxdb_client_3 import write_client_options, WriteOptions, InfluxDBError
3 |
4 |
5 | class BatchingCallback(object):
6 |
7 | def success(self, conf, data: str):
8 | print(f"Written batch: {conf}, data: {data}")
9 |
10 | def error(self, conf, data: str, exception: InfluxDBError):
11 | print(f"Cannot write batch: {conf}, data: {data} due: {exception}")
12 |
13 | def retry(self, conf, data: str, exception: InfluxDBError):
14 | print(f"Retryable error occurs for batch: {conf}, data: {data} retry: {exception}")
15 |
16 |
17 | callback = BatchingCallback()
18 |
19 | write_options = WriteOptions(batch_size=500,
20 | flush_interval=10_000,
21 | jitter_interval=2_000,
22 | retry_interval=5_000,
23 | max_retries=5,
24 | max_retry_delay=30_000,
25 | exponential_base=2)
26 |
27 | wco = write_client_options(success_callback=callback.success,
28 | error_callback=callback.error,
29 | retry_callback=callback.retry,
30 | write_options=write_options
31 | )
32 |
33 | with InfluxDBClient3.InfluxDBClient3(
34 | token="INSERT_TOKEN",
35 | host="eu-central-1-1.aws.cloud2.influxdata.com",
36 | database="python",
37 | write_client_options=wco) as client:
38 | client.write_file(
39 | file='./out.parquet',
40 | timestamp_column='time', tag_columns=["provider", "machineID"])
41 |
--------------------------------------------------------------------------------
/Examples/file-import/write_file_parse_options.py:
--------------------------------------------------------------------------------
1 | import influxdb_client_3 as InfluxDBClient3
2 | from influxdb_client_3 import write_client_options, WriteOptions, InfluxDBError, file_parser_options
3 |
4 |
5 | class BatchingCallback(object):
6 |
7 | def success(self, conf, data: str):
8 | print(f"Written batch: {conf}, data: {data}")
9 |
10 | def error(self, conf, data: str, exception: InfluxDBError):
11 | print(f"Cannot write batch: {conf}, data: {data} due: {exception}")
12 |
13 | def retry(self, conf, data: str, exception: InfluxDBError):
14 | print(f"Retryable error occurs for batch: {conf}, data: {data} retry: {exception}")
15 |
16 |
17 | callback = BatchingCallback()
18 |
19 | write_options = WriteOptions(batch_size=500,
20 | flush_interval=10_000,
21 | jitter_interval=2_000,
22 | retry_interval=5_000,
23 | max_retries=5,
24 | max_retry_delay=30_000,
25 | exponential_base=2)
26 |
27 | wco = write_client_options(success_callback=callback.success,
28 | error_callback=callback.error,
29 | retry_callback=callback.retry,
30 | write_options=write_options
31 | )
32 |
33 | with InfluxDBClient3.InfluxDBClient3(
34 | token="",
35 | host="eu-central-1-1.aws.cloud2.influxdata.com",
36 | database="python", write_client_options=wco) as client:
37 | fpo = file_parser_options(columns=["time", "machineID", "vibration"])
38 |
39 | client.write_file(
40 | file='./out.parquet',
41 | timestamp_column='time', tag_columns=["provider", "machineID"], measurement_name='machine_data',
42 | file_parser_options=fpo)
43 |
--------------------------------------------------------------------------------
/Examples/flight_options_example.py:
--------------------------------------------------------------------------------
1 | import influxdb_client_3 as InfluxDBClient3
2 | from influxdb_client_3 import flight_client_options
3 |
4 |
5 | with open("./cert.pem", 'rb') as f:
6 | cert = f.read()
7 | print(cert)
8 |
9 |
10 | client = InfluxDBClient3.InfluxDBClient3(
11 | token="",
12 | host="b0c7cce5-8dbc-428e-98c6-7f996fb96467.a.influxdb.io",
13 | database="flightdemo",
14 | flight_client_options=flight_client_options(
15 | tls_root_certs=cert))
16 |
17 |
18 | table = client.query(
19 | query="SELECT * FROM flight WHERE time > now() - 4h",
20 | language="influxql")
21 |
22 | print(table.to_pandas())
23 |
--------------------------------------------------------------------------------
/Examples/handle_http_error.py:
--------------------------------------------------------------------------------
1 | """
2 | Demonstrates handling response error headers on error.
3 | """
4 | import logging
5 | from config import Config
6 |
7 | import influxdb_client_3 as InfluxDBClient3
8 |
9 |
10 | def main() -> None:
11 | """
12 | Main function
13 | :return:
14 | """
15 | config = Config()
16 | logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO)
17 |
18 | client = InfluxDBClient3.InfluxDBClient3(
19 | host=config.host,
20 | token=config.token,
21 | database=config.database
22 | )
23 |
24 | # write with empty field results in HTTP 400 error
25 | # Other cases might be HTTP 503 or HTTP 429 too many requests
26 | lp = 'drone,location=harfa,id=A16E22 speed=18.7,alt=97.6,shutter='
27 |
28 | try:
29 | client.write(lp)
30 | except InfluxDBClient3.InfluxDBError as idberr:
31 | logging.log(logging.ERROR, 'WRITE ERROR: %s (%s)',
32 | idberr.response.status,
33 | idberr.message)
34 | headers_string = 'Response Headers:\n'
35 | headers = idberr.getheaders()
36 | for h in headers:
37 | headers_string += f' {h}: {headers[h]}\n'
38 | logging.log(logging.INFO, headers_string)
39 |
40 |
41 | if __name__ == "__main__":
42 | main()
43 |
--------------------------------------------------------------------------------
/Examples/handle_query_error.py:
--------------------------------------------------------------------------------
1 | """
2 | Demonstrates handling error when querying InfluxDB.
3 | """
4 | import logging
5 | from config import Config
6 | from influxdb_client_3.exceptions import InfluxDB3ClientQueryError
7 |
8 | import influxdb_client_3 as InfluxDBClient3
9 |
10 |
11 | def main() -> None:
12 | """
13 | Main function
14 | :return:
15 | """
16 | config = Config()
17 | logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO)
18 |
19 | client = InfluxDBClient3.InfluxDBClient3(
20 | host=config.host,
21 | token=config.token,
22 | database=config.database
23 | )
24 |
25 | try:
26 | # Select from a bucket that doesn't exist
27 | client.query("Select a from cpu11")
28 | except InfluxDB3ClientQueryError as e:
29 | logging.log(logging.ERROR, e.message)
30 |
31 |
32 | if __name__ == "__main__":
33 | main()
34 |
--------------------------------------------------------------------------------
/Examples/pandas_write.py:
--------------------------------------------------------------------------------
1 | import influxdb_client_3 as InfluxDBClient3
2 | import pandas as pd
3 | import numpy as np
4 |
5 | client = InfluxDBClient3.InfluxDBClient3(
6 | token="",
7 | host="eu-central-1-1.aws.cloud2.influxdata.com",
8 | database="")
9 |
10 |
11 | # Create a dataframe
12 | df = pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]})
13 |
14 |
15 | # Create a range of datetime values
16 | dates = pd.date_range(start='2023-03-01', end='2023-03-29', freq='5min')
17 |
18 | # Create a DataFrame with random data and datetime index
19 | df = pd.DataFrame(
20 | np.random.randn(
21 | len(dates),
22 | 3),
23 | index=dates,
24 | columns=[
25 | 'Column 1',
26 | 'Column 2',
27 | 'Column 3'])
28 | df['tagkey'] = 'Hello World'
29 |
30 | print(df)
31 |
32 | # Write the DataFrame to InfluxDB
33 | client.write(df, data_frame_measurement_name='table',
34 | data_frame_tag_columns=['tagkey'])
35 |
--------------------------------------------------------------------------------
/Examples/pokemon-trainer/basic-query.py:
--------------------------------------------------------------------------------
1 | from influxdb_client_3 import InfluxDBClient3
2 |
3 | client = InfluxDBClient3(
4 | token="",
5 | host="eu-central-1-1.aws.cloud2.influxdata.com",
6 | database="pokemon-codex")
7 |
8 | sql = '''SELECT * FROM caught WHERE trainer = 'ash' AND time >= now() - interval '1 hour' LIMIT 5'''
9 | table = client.query(query=sql, language='sql', mode='all')
10 | print(table)
11 |
12 | influxql = '''SELECT * FROM caught WHERE trainer = 'ash' AND time > now() - 1h LIMIT 5'''
13 | table = client.query(query=influxql, language='influxql', mode='pandas')
14 | print(table)
15 |
--------------------------------------------------------------------------------
/Examples/pokemon-trainer/basic-write-errorhandling.py:
--------------------------------------------------------------------------------
1 | import datetime
2 |
3 | from influxdb_client_3 import InfluxDBClient3, Point, SYNCHRONOUS, write_client_options
4 |
5 | wco = write_client_options(write_options=SYNCHRONOUS)
6 |
7 | with InfluxDBClient3(
8 | token="",
9 | host="eu-central-1-1.aws.cloud2.influxdata.com",
10 | database="pokemon-codex", write_client_options=wco) as client:
11 | now = datetime.datetime.now(datetime.timezone.utc)
12 |
13 | data = Point("caught").tag("trainer", "ash").tag("id", "0006").tag("num", "1") \
14 | .field("caught", "charizard") \
15 | .field("level", 10).field("attack", 30) \
16 | .field("defense", 40).field("hp", 200) \
17 | .field("speed", 10) \
18 | .field("type1", "fire").field("type2", "flying") \
19 | .time(now)
20 |
21 | data = []
22 | # Adding first point
23 | data.append(
24 | Point("caught")
25 | .tag("trainer", "ash")
26 | .tag("id", "0006")
27 | .tag("num", "1")
28 | .field("caught", "charizard")
29 | .field("level", 10)
30 | .field("attack", 30)
31 | .field("defense", 40)
32 | .field("hp", 200)
33 | .field("speed", 10)
34 | .field("type1", "fire")
35 | .field("type2", "flying")
36 | .time(now)
37 | )
38 |
39 | # Bad point
40 | data.append(
41 | Point("caught")
42 | .tag("trainer", "ash")
43 | .tag("id", "0008")
44 | .tag("num", "3")
45 | .field("caught", "squirtle")
46 | .field("level", 13)
47 | .field("attack", 29)
48 | .field("defense", 40)
49 | .field("hp", 180)
50 | .field("speed", 13)
51 | .field("type1", "water")
52 | .field("type2", None)
53 | .time(now)
54 | )
55 |
56 | try:
57 | client.write(data)
58 | except Exception as e:
59 | print(f"Error writing point: {e}")
60 |
61 | # Good Query
62 | try:
63 | table = client.query(query='''SELECT * FROM "caught" WHERE time > now() - 5m''', language='influxql')
64 | print(table)
65 | except Exception as e:
66 | print(f"Error querying data: {e}")
67 |
68 | # Bad Query - not a sql query
69 | try:
70 | table = client.query(query='''SELECT * FROM "caught" WHERE time > now() - 5m''', language='sql')
71 | print(table)
72 | except Exception as e:
73 | print(f"Error querying data: {e}")
74 |
--------------------------------------------------------------------------------
/Examples/pokemon-trainer/basic-write-writeoptions.py:
--------------------------------------------------------------------------------
1 | import datetime
2 |
3 | from influxdb_client_3 import InfluxDBClient3, Point, SYNCHRONOUS, write_client_options
4 |
5 | wco = write_client_options(write_options=SYNCHRONOUS)
6 |
7 | with InfluxDBClient3(
8 | token="",
9 | host="eu-central-1-1.aws.cloud2.influxdata.com",
10 | database="pokemon-codex",
11 | write_client_options=wco,
12 | debug=True) as client:
13 | now = datetime.datetime.now(datetime.timezone.utc)
14 |
15 | data = Point("caught").tag("trainer", "ash").tag("id", "0006").tag("num", "1") \
16 | .field("caught", "charizard") \
17 | .field("level", 10).field("attack", 30) \
18 | .field("defense", 40).field("hp", 200) \
19 | .field("speed", 10) \
20 | .field("type1", "fire").field("type2", "flying") \
21 | .time(now)
22 |
23 | try:
24 | client.write(data)
25 | except Exception as e:
26 | print(f"Error writing point: {e}")
27 |
28 | data = []
29 | # Adding first point
30 | data.append(
31 | Point("caught")
32 | .tag("trainer", "ash")
33 | .tag("id", "0006")
34 | .tag("num", "1")
35 | .field("caught", "charizard")
36 | .field("level", 10)
37 | .field("attack", 30)
38 | .field("defense", 40)
39 | .field("hp", 200)
40 | .field("speed", 10)
41 | .field("type1", "fire")
42 | .field("type2", "flying")
43 | .time(now)
44 | )
45 |
46 | # Adding second point
47 | data.append(
48 | Point("caught")
49 | .tag("trainer", "ash")
50 | .tag("id", "0007")
51 | .tag("num", "2")
52 | .field("caught", "bulbasaur")
53 | .field("level", 12)
54 | .field("attack", 31)
55 | .field("defense", 31)
56 | .field("hp", 190)
57 | .field("speed", 11)
58 | .field("type1", "grass")
59 | .field("type2", "poison")
60 | .time(now)
61 | )
62 |
63 | # Adding third point
64 | data.append(
65 | Point("caught")
66 | .tag("trainer", "ash")
67 | .tag("id", "0008")
68 | .tag("num", "3")
69 | .field("caught", "squirtle")
70 | .field("level", 13)
71 | .field("attack", 29)
72 | .field("defense", 40)
73 | .field("hp", 180)
74 | .field("speed", 13)
75 | .field("type1", "water")
76 | .field("type2", None)
77 | .time(now)
78 | )
79 |
80 | try:
81 | client.write(data)
82 | except Exception as e:
83 | print(f"Error writing point: {e}")
84 |
--------------------------------------------------------------------------------
/Examples/pokemon-trainer/basic-write.py:
--------------------------------------------------------------------------------
1 | import datetime
2 |
3 | from influxdb_client_3 import InfluxDBClient3, Point
4 |
5 | client = InfluxDBClient3(
6 | token="mGbL-OJ2kxYqvbIL9jQOOg2VJLhf16hh-xn-XJe3RUKrI5cewOAy80L5cVIzG0vh7dLLckZkpYfvExgoMBXLFA==",
7 | host="eu-central-1-1.aws.cloud2.influxdata.com",
8 | database="pokemon-codex")
9 |
10 | now = datetime.datetime.now(datetime.timezone.utc)
11 |
12 | data = Point("caught").tag("trainer", "ash").tag("id", "0006").tag("num", "1") \
13 | .field("caught", "charizard") \
14 | .field("level", 10).field("attack", 30) \
15 | .field("defense", 40).field("hp", 200) \
16 | .field("speed", 10) \
17 | .field("type1", "fire").field("type2", "flying") \
18 | .time(now)
19 |
20 | try:
21 | client.write(data)
22 | except Exception as e:
23 | print(f"Error writing point: {e}")
24 |
25 | data = []
26 | # Adding first point
27 | data.append(
28 | Point("caught")
29 | .tag("trainer", "ash")
30 | .tag("id", "0006")
31 | .tag("num", "1")
32 | .field("caught", "charizard")
33 | .field("level", 10)
34 | .field("attack", 30)
35 | .field("defense", 40)
36 | .field("hp", 200)
37 | .field("speed", 10)
38 | .field("type1", "fire")
39 | .field("type2", "flying")
40 | .time(now)
41 | )
42 |
43 | # Adding second point
44 | data.append(
45 | Point("caught")
46 | .tag("trainer", "ash")
47 | .tag("id", "0007")
48 | .tag("num", "2")
49 | .field("caught", "bulbasaur")
50 | .field("level", 12)
51 | .field("attack", 31)
52 | .field("defense", 31)
53 | .field("hp", 190)
54 | .field("speed", 11)
55 | .field("type1", "grass")
56 | .field("type2", "poison")
57 | .time(now)
58 | )
59 |
60 | # Adding third point
61 | data.append(
62 | Point("caught")
63 | .tag("trainer", "ash")
64 | .tag("id", "0008")
65 | .tag("num", "3")
66 | .field("caught", "squirtle")
67 | .field("level", 13)
68 | .field("attack", 29)
69 | .field("defense", 40)
70 | .field("hp", 180)
71 | .field("speed", 13)
72 | .field("type1", "water")
73 | .field("type2", None)
74 | .time(now)
75 | )
76 |
77 | try:
78 | client.write(data)
79 | except Exception as e:
80 | print(f"Error writing point: {e}")
81 |
--------------------------------------------------------------------------------
/Examples/pokemon-trainer/kanto.parquet:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/InfluxCommunity/influxdb3-python/7b9e8027abc22eeb9ce7236f46b68aa31b706c2b/Examples/pokemon-trainer/kanto.parquet
--------------------------------------------------------------------------------
/Examples/pokemon-trainer/pandas-write.py:
--------------------------------------------------------------------------------
1 | import random
2 |
3 | import pandas as pd
4 |
5 | from influxdb_client_3 import InfluxDBClient3
6 |
7 | client = InfluxDBClient3(
8 | token="",
9 | host="eu-central-1-1.aws.cloud2.influxdata.com",
10 | database="pokemon-codex")
11 |
12 | now = pd.Timestamp.now(tz='UTC').floor('ms')
13 |
14 | # Lists of possible trainers
15 | trainers = ["ash", "brock", "misty", "gary", "jessie", "james"]
16 |
17 | # Read the CSV into a DataFrame
18 | pokemon_df = pd.read_csv(
19 | "https://gist.githubusercontent.com/ritchie46/cac6b337ea52281aa23c049250a4ff03/raw/89a957ff3919d90e6ef2d34235e6bf22304f3366/pokemon.csv") # noqa: E501
20 |
21 | # Creating an empty list to store the data
22 | data = []
23 |
24 | # Dictionary to keep track of the number of times each trainer has caught each Pokémon
25 | trainer_pokemon_counts = {}
26 |
27 | # Number of entries we want to create
28 | num_entries = 100
29 |
30 | # Generating random data
31 | for i in range(num_entries):
32 | trainer = random.choice(trainers)
33 |
34 | # Randomly select a row from pokemon_df
35 | random_pokemon = pokemon_df.sample().iloc[0]
36 | caught = random_pokemon['Name']
37 |
38 | # Count the number of times this trainer has caught this Pokémon
39 | if (trainer, caught) in trainer_pokemon_counts:
40 | trainer_pokemon_counts[(trainer, caught)] += 1
41 | else:
42 | trainer_pokemon_counts[(trainer, caught)] = 1
43 |
44 | # Get the number for this combination of trainer and Pokémon
45 | num = trainer_pokemon_counts[(trainer, caught)]
46 |
47 | entry = {
48 | "trainer": trainer,
49 | "id": f"{0000 + random_pokemon['#']:04d}",
50 | "num": str(num),
51 | "caught": caught,
52 | "level": random.randint(5, 20),
53 | "attack": random_pokemon['Attack'],
54 | "defense": random_pokemon['Defense'],
55 | "hp": random_pokemon['HP'],
56 | "speed": random_pokemon['Speed'],
57 | "type1": random_pokemon['Type 1'],
58 | "type2": random_pokemon['Type 2'],
59 | "timestamp": now
60 | }
61 | data.append(entry)
62 |
63 | # Convert the list of dictionaries to a DataFrame
64 | caught_pokemon_df = pd.DataFrame(data).set_index('timestamp')
65 |
66 | # Print the DataFrame
67 | print(caught_pokemon_df)
68 |
69 | try:
70 | client.write(caught_pokemon_df, data_frame_measurement_name='caught',
71 | data_frame_tag_columns=['trainer', 'id', 'num'])
72 | except Exception as e:
73 | print(f"Error writing point: {e}")
74 |
--------------------------------------------------------------------------------
/Examples/pokemon-trainer/pokemon.csv:
--------------------------------------------------------------------------------
1 | #,Name,Type 1,Type 2,Total,HP,Attack,Defense,Sp. Atk,Sp. Def,Speed,Generation,Legendary
2 | 1,Bulbasaur,Grass,Poison,318,45,49,49,65,65,45,1,False
3 | 2,Ivysaur,Grass,Poison,405,60,62,63,80,80,60,1,False
4 | 3,Venusaur,Grass,Poison,525,80,82,83,100,100,80,1,False
5 | 3,VenusaurMega Venusaur,Grass,Poison,625,80,100,123,122,120,80,1,False
6 | 4,Charmander,Fire,,309,39,52,43,60,50,65,1,False
7 | 5,Charmeleon,Fire,,405,58,64,58,80,65,80,1,False
8 | 6,Charizard,Fire,Flying,534,78,84,78,109,85,100,1,False
9 | 6,CharizardMega Charizard X,Fire,Dragon,634,78,130,111,130,85,100,1,False
10 | 6,CharizardMega Charizard Y,Fire,Flying,634,78,104,78,159,115,100,1,False
11 | 7,Squirtle,Water,,314,44,48,65,50,64,43,1,False
12 | 8,Wartortle,Water,,405,59,63,80,65,80,58,1,False
13 | 9,Blastoise,Water,,530,79,83,100,85,105,78,1,False
14 | 9,BlastoiseMega Blastoise,Water,,630,79,103,120,135,115,78,1,False
15 | 10,Caterpie,Bug,,195,45,30,35,20,20,45,1,False
16 | 11,Metapod,Bug,,205,50,20,55,25,25,30,1,False
17 | 12,Butterfree,Bug,Flying,395,60,45,50,90,80,70,1,False
18 | 13,Weedle,Bug,Poison,195,40,35,30,20,20,50,1,False
19 | 14,Kakuna,Bug,Poison,205,45,25,50,25,25,35,1,False
20 | 15,Beedrill,Bug,Poison,395,65,90,40,45,80,75,1,False
21 | 15,BeedrillMega Beedrill,Bug,Poison,495,65,150,40,15,80,145,1,False
22 | 16,Pidgey,Normal,Flying,251,40,45,40,35,35,56,1,False
23 | 17,Pidgeotto,Normal,Flying,349,63,60,55,50,50,71,1,False
24 | 18,Pidgeot,Normal,Flying,479,83,80,75,70,70,101,1,False
25 | 18,PidgeotMega Pidgeot,Normal,Flying,579,83,80,80,135,80,121,1,False
26 | 19,Rattata,Normal,,253,30,56,35,25,35,72,1,False
27 | 20,Raticate,Normal,,413,55,81,60,50,70,97,1,False
28 | 21,Spearow,Normal,Flying,262,40,60,30,31,31,70,1,False
29 | 22,Fearow,Normal,Flying,442,65,90,65,61,61,100,1,False
30 | 23,Ekans,Poison,,288,35,60,44,40,54,55,1,False
31 | 24,Arbok,Poison,,438,60,85,69,65,79,80,1,False
32 | 25,Pikachu,Electric,,320,35,55,40,50,50,90,1,False
33 | 26,Raichu,Electric,,485,60,90,55,90,80,110,1,False
34 | 27,Sandshrew,Ground,,300,50,75,85,20,30,40,1,False
35 | 28,Sandslash,Ground,,450,75,100,110,45,55,65,1,False
36 | 29,Nidoran♀,Poison,,275,55,47,52,40,40,41,1,False
37 | 30,Nidorina,Poison,,365,70,62,67,55,55,56,1,False
38 | 31,Nidoqueen,Poison,Ground,505,90,92,87,75,85,76,1,False
39 | 32,Nidoran♂,Poison,,273,46,57,40,40,40,50,1,False
40 | 33,Nidorino,Poison,,365,61,72,57,55,55,65,1,False
41 | 34,Nidoking,Poison,Ground,505,81,102,77,85,75,85,1,False
42 | 35,Clefairy,Fairy,,323,70,45,48,60,65,35,1,False
43 | 36,Clefable,Fairy,,483,95,70,73,95,90,60,1,False
44 | 37,Vulpix,Fire,,299,38,41,40,50,65,65,1,False
45 | 38,Ninetales,Fire,,505,73,76,75,81,100,100,1,False
46 | 39,Jigglypuff,Normal,Fairy,270,115,45,20,45,25,20,1,False
47 | 40,Wigglytuff,Normal,Fairy,435,140,70,45,85,50,45,1,False
48 | 41,Zubat,Poison,Flying,245,40,45,35,30,40,55,1,False
49 | 42,Golbat,Poison,Flying,455,75,80,70,65,75,90,1,False
50 | 43,Oddish,Grass,Poison,320,45,50,55,75,65,30,1,False
51 | 44,Gloom,Grass,Poison,395,60,65,70,85,75,40,1,False
52 | 45,Vileplume,Grass,Poison,490,75,80,85,110,90,50,1,False
53 | 46,Paras,Bug,Grass,285,35,70,55,45,55,25,1,False
54 | 47,Parasect,Bug,Grass,405,60,95,80,60,80,30,1,False
55 | 48,Venonat,Bug,Poison,305,60,55,50,40,55,45,1,False
56 | 49,Venomoth,Bug,Poison,450,70,65,60,90,75,90,1,False
57 | 50,Diglett,Ground,,265,10,55,25,35,45,95,1,False
58 | 51,Dugtrio,Ground,,405,35,80,50,50,70,120,1,False
59 | 52,Meowth,Normal,,290,40,45,35,40,40,90,1,False
60 | 53,Persian,Normal,,440,65,70,60,65,65,115,1,False
61 | 54,Psyduck,Water,,320,50,52,48,65,50,55,1,False
62 | 55,Golduck,Water,,500,80,82,78,95,80,85,1,False
63 | 56,Mankey,Fighting,,305,40,80,35,35,45,70,1,False
64 | 57,Primeape,Fighting,,455,65,105,60,60,70,95,1,False
65 | 58,Growlithe,Fire,,350,55,70,45,70,50,60,1,False
66 | 59,Arcanine,Fire,,555,90,110,80,100,80,95,1,False
67 | 60,Poliwag,Water,,300,40,50,40,40,40,90,1,False
68 | 61,Poliwhirl,Water,,385,65,65,65,50,50,90,1,False
69 | 62,Poliwrath,Water,Fighting,510,90,95,95,70,90,70,1,False
70 | 63,Abra,Psychic,,310,25,20,15,105,55,90,1,False
71 | 64,Kadabra,Psychic,,400,40,35,30,120,70,105,1,False
72 | 65,Alakazam,Psychic,,500,55,50,45,135,95,120,1,False
73 | 65,AlakazamMega Alakazam,Psychic,,590,55,50,65,175,95,150,1,False
74 | 66,Machop,Fighting,,305,70,80,50,35,35,35,1,False
75 | 67,Machoke,Fighting,,405,80,100,70,50,60,45,1,False
76 | 68,Machamp,Fighting,,505,90,130,80,65,85,55,1,False
77 | 69,Bellsprout,Grass,Poison,300,50,75,35,70,30,40,1,False
78 | 70,Weepinbell,Grass,Poison,390,65,90,50,85,45,55,1,False
79 | 71,Victreebel,Grass,Poison,490,80,105,65,100,70,70,1,False
80 | 72,Tentacool,Water,Poison,335,40,40,35,50,100,70,1,False
81 | 73,Tentacruel,Water,Poison,515,80,70,65,80,120,100,1,False
82 | 74,Geodude,Rock,Ground,300,40,80,100,30,30,20,1,False
83 | 75,Graveler,Rock,Ground,390,55,95,115,45,45,35,1,False
84 | 76,Golem,Rock,Ground,495,80,120,130,55,65,45,1,False
85 | 77,Ponyta,Fire,,410,50,85,55,65,65,90,1,False
86 | 78,Rapidash,Fire,,500,65,100,70,80,80,105,1,False
87 | 79,Slowpoke,Water,Psychic,315,90,65,65,40,40,15,1,False
88 | 80,Slowbro,Water,Psychic,490,95,75,110,100,80,30,1,False
89 | 80,SlowbroMega Slowbro,Water,Psychic,590,95,75,180,130,80,30,1,False
90 | 81,Magnemite,Electric,Steel,325,25,35,70,95,55,45,1,False
91 | 82,Magneton,Electric,Steel,465,50,60,95,120,70,70,1,False
92 | 83,Farfetch'd,Normal,Flying,352,52,65,55,58,62,60,1,False
93 | 84,Doduo,Normal,Flying,310,35,85,45,35,35,75,1,False
94 | 85,Dodrio,Normal,Flying,460,60,110,70,60,60,100,1,False
95 | 86,Seel,Water,,325,65,45,55,45,70,45,1,False
96 | 87,Dewgong,Water,Ice,475,90,70,80,70,95,70,1,False
97 | 88,Grimer,Poison,,325,80,80,50,40,50,25,1,False
98 | 89,Muk,Poison,,500,105,105,75,65,100,50,1,False
99 | 90,Shellder,Water,,305,30,65,100,45,25,40,1,False
100 | 91,Cloyster,Water,Ice,525,50,95,180,85,45,70,1,False
101 | 92,Gastly,Ghost,Poison,310,30,35,30,100,35,80,1,False
102 | 93,Haunter,Ghost,Poison,405,45,50,45,115,55,95,1,False
103 | 94,Gengar,Ghost,Poison,500,60,65,60,130,75,110,1,False
104 | 94,GengarMega Gengar,Ghost,Poison,600,60,65,80,170,95,130,1,False
105 | 95,Onix,Rock,Ground,385,35,45,160,30,45,70,1,False
106 | 96,Drowzee,Psychic,,328,60,48,45,43,90,42,1,False
107 | 97,Hypno,Psychic,,483,85,73,70,73,115,67,1,False
108 | 98,Krabby,Water,,325,30,105,90,25,25,50,1,False
109 | 99,Kingler,Water,,475,55,130,115,50,50,75,1,False
110 | 100,Voltorb,Electric,,330,40,30,50,55,55,100,1,False
111 | 101,Electrode,Electric,,480,60,50,70,80,80,140,1,False
112 | 102,Exeggcute,Grass,Psychic,325,60,40,80,60,45,40,1,False
113 | 103,Exeggutor,Grass,Psychic,520,95,95,85,125,65,55,1,False
114 | 104,Cubone,Ground,,320,50,50,95,40,50,35,1,False
115 | 105,Marowak,Ground,,425,60,80,110,50,80,45,1,False
116 | 106,Hitmonlee,Fighting,,455,50,120,53,35,110,87,1,False
117 | 107,Hitmonchan,Fighting,,455,50,105,79,35,110,76,1,False
118 | 108,Lickitung,Normal,,385,90,55,75,60,75,30,1,False
119 | 109,Koffing,Poison,,340,40,65,95,60,45,35,1,False
120 | 110,Weezing,Poison,,490,65,90,120,85,70,60,1,False
121 | 111,Rhyhorn,Ground,Rock,345,80,85,95,30,30,25,1,False
122 | 112,Rhydon,Ground,Rock,485,105,130,120,45,45,40,1,False
123 | 113,Chansey,Normal,,450,250,5,5,35,105,50,1,False
124 | 114,Tangela,Grass,,435,65,55,115,100,40,60,1,False
125 | 115,Kangaskhan,Normal,,490,105,95,80,40,80,90,1,False
126 | 115,KangaskhanMega Kangaskhan,Normal,,590,105,125,100,60,100,100,1,False
127 | 116,Horsea,Water,,295,30,40,70,70,25,60,1,False
128 | 117,Seadra,Water,,440,55,65,95,95,45,85,1,False
129 | 118,Goldeen,Water,,320,45,67,60,35,50,63,1,False
130 | 119,Seaking,Water,,450,80,92,65,65,80,68,1,False
131 | 120,Staryu,Water,,340,30,45,55,70,55,85,1,False
132 | 121,Starmie,Water,Psychic,520,60,75,85,100,85,115,1,False
133 | 122,Mr. Mime,Psychic,Fairy,460,40,45,65,100,120,90,1,False
134 | 123,Scyther,Bug,Flying,500,70,110,80,55,80,105,1,False
135 | 124,Jynx,Ice,Psychic,455,65,50,35,115,95,95,1,False
136 | 125,Electabuzz,Electric,,490,65,83,57,95,85,105,1,False
137 | 126,Magmar,Fire,,495,65,95,57,100,85,93,1,False
138 | 127,Pinsir,Bug,,500,65,125,100,55,70,85,1,False
139 | 127,PinsirMega Pinsir,Bug,Flying,600,65,155,120,65,90,105,1,False
140 | 128,Tauros,Normal,,490,75,100,95,40,70,110,1,False
141 | 129,Magikarp,Water,,200,20,10,55,15,20,80,1,False
142 | 130,Gyarados,Water,Flying,540,95,125,79,60,100,81,1,False
143 | 130,GyaradosMega Gyarados,Water,Dark,640,95,155,109,70,130,81,1,False
144 | 131,Lapras,Water,Ice,535,130,85,80,85,95,60,1,False
145 | 132,Ditto,Normal,,288,48,48,48,48,48,48,1,False
146 | 133,Eevee,Normal,,325,55,55,50,45,65,55,1,False
147 | 134,Vaporeon,Water,,525,130,65,60,110,95,65,1,False
148 | 135,Jolteon,Electric,,525,65,65,60,110,95,130,1,False
149 | 136,Flareon,Fire,,525,65,130,60,95,110,65,1,False
150 | 137,Porygon,Normal,,395,65,60,70,85,75,40,1,False
151 | 138,Omanyte,Rock,Water,355,35,40,100,90,55,35,1,False
152 | 139,Omastar,Rock,Water,495,70,60,125,115,70,55,1,False
153 | 140,Kabuto,Rock,Water,355,30,80,90,55,45,55,1,False
154 | 141,Kabutops,Rock,Water,495,60,115,105,65,70,80,1,False
155 | 142,Aerodactyl,Rock,Flying,515,80,105,65,60,75,130,1,False
156 | 142,AerodactylMega Aerodactyl,Rock,Flying,615,80,135,85,70,95,150,1,False
157 | 143,Snorlax,Normal,,540,160,110,65,65,110,30,1,False
158 | 144,Articuno,Ice,Flying,580,90,85,100,95,125,85,1,True
159 | 145,Zapdos,Electric,Flying,580,90,90,85,125,90,100,1,True
160 | 146,Moltres,Fire,Flying,580,90,100,90,125,85,90,1,True
161 | 147,Dratini,Dragon,,300,41,64,45,50,50,50,1,False
162 | 148,Dragonair,Dragon,,420,61,84,65,70,70,70,1,False
163 | 149,Dragonite,Dragon,Flying,600,91,134,95,100,100,80,1,False
164 | 150,Mewtwo,Psychic,,680,106,110,90,154,90,130,1,True
--------------------------------------------------------------------------------
/Examples/pokemon-trainer/write-batching-flight-calloptions.py:
--------------------------------------------------------------------------------
1 | import random
2 |
3 | import pandas as pd
4 |
5 | from influxdb_client_3 import InfluxDBClient3, InfluxDBError, WriteOptions, write_client_options
6 |
7 | now = pd.Timestamp.now(tz='UTC').floor('ms')
8 | two_days_ago = now
9 |
10 |
11 | class BatchingCallback(object):
12 |
13 | def success(self, conf, data: str):
14 | print(f"Written batch: {conf}, data: {data}")
15 |
16 | def error(self, conf, data: str, exception: InfluxDBError):
17 | print(f"Cannot write batch: {conf}, data: {data} due: {exception}")
18 |
19 | def retry(self, conf, data: str, exception: InfluxDBError):
20 | print(f"Retryable error occurs for batch: {conf}, data: {data} retry: {exception}")
21 |
22 |
23 | callback = BatchingCallback()
24 |
25 | write_options = WriteOptions(batch_size=100,
26 | flush_interval=10_000,
27 | jitter_interval=2_000,
28 | retry_interval=5_000,
29 | max_retries=5,
30 | max_retry_delay=30_000,
31 | exponential_base=2)
32 |
33 | wco = write_client_options(success_callback=callback.success,
34 | error_callback=callback.error,
35 | retry_callback=callback.retry,
36 | write_options=write_options
37 | )
38 |
39 | client = InfluxDBClient3(
40 | token="",
41 | host="eu-central-1-1.aws.cloud2.influxdata.com",
42 | enable_gzip=True, write_client_options=wco)
43 |
44 | now = pd.Timestamp.now(tz='UTC').floor('ms')
45 |
46 | # Lists of possible trainers
47 | trainers = ["ash", "brock", "misty", "gary", "jessie", "james"]
48 |
49 | # Read the CSV into a DataFrame
50 | pokemon_df = pd.read_csv(
51 | "https://gist.githubusercontent.com/ritchie46/cac6b337ea52281aa23c049250a4ff03/raw/89a957ff3919d90e6ef2d34235e6bf22304f3366/pokemon.csv") # noqa: E501
52 |
53 | # Creating an empty list to store the data
54 | data = []
55 |
56 | # Dictionary to keep track of the number of times each trainer has caught each Pokémon
57 | trainer_pokemon_counts = {}
58 |
59 | # Number of entries we want to create
60 | num_entries = 1000
61 |
62 | # Generating random data
63 | for i in range(num_entries):
64 | trainer = random.choice(trainers)
65 |
66 | # Randomly select a row from pokemon_df
67 | random_pokemon = pokemon_df.sample().iloc[0]
68 | caught = random_pokemon['Name']
69 |
70 | # Count the number of times this trainer has caught this Pokémon
71 | if (trainer, caught) in trainer_pokemon_counts:
72 | trainer_pokemon_counts[(trainer, caught)] += 1
73 | else:
74 | trainer_pokemon_counts[(trainer, caught)] = 1
75 |
76 | # Get the number for this combination of trainer and Pokémon
77 | num = trainer_pokemon_counts[(trainer, caught)]
78 |
79 | entry = {
80 | "trainer": trainer,
81 | "id": f"{0000 + random_pokemon['#']:04d}",
82 | "num": str(num),
83 | "caught": caught,
84 | "level": random.randint(5, 20),
85 | "attack": random_pokemon['Attack'],
86 | "defense": random_pokemon['Defense'],
87 | "hp": random_pokemon['HP'],
88 | "speed": random_pokemon['Speed'],
89 | "type1": random_pokemon['Type 1'],
90 | "type2": random_pokemon['Type 2'],
91 | "timestamp": two_days_ago
92 | }
93 | data.append(entry)
94 |
95 | # Convert the list of dictionaries to a DataFrame
96 | caught_pokemon_df = pd.DataFrame(data).set_index('timestamp')
97 |
98 | # Print the DataFrame
99 | # print(caught_pokemon_df)
100 |
101 |
102 | # Query
103 | try:
104 | table = client.query(query='''SELECT * FROM caught WHERE time >= now() - 30m''', database='pokemon-codex',
105 | timeout=90.0, language='sql', mode='pandas')
106 | print(table)
107 | except Exception as e:
108 | print(f"Error querying points: {e}")
109 |
--------------------------------------------------------------------------------
/Examples/pokemon-trainer/write-batching.py:
--------------------------------------------------------------------------------
1 | import random
2 |
3 | import pandas as pd
4 |
5 | from influxdb_client_3 import InfluxDBClient3, InfluxDBError, WriteOptions, write_client_options
6 |
7 |
8 | class BatchingCallback(object):
9 |
10 | def success(self, conf, data: str):
11 | print(f"Written batch: {conf}, data: {data}")
12 |
13 | def error(self, conf, data: str, exception: InfluxDBError):
14 | print(f"Cannot write batch: {conf}, data: {data} due: {exception}")
15 |
16 | def retry(self, conf, data: str, exception: InfluxDBError):
17 | print(f"Retryable error occurs for batch: {conf}, data: {data} retry: {exception}")
18 |
19 |
20 | callback = BatchingCallback()
21 |
22 | write_options = WriteOptions(batch_size=100,
23 | flush_interval=10_000,
24 | jitter_interval=2_000,
25 | retry_interval=5_000,
26 | max_retries=5,
27 | max_retry_delay=30_000,
28 | exponential_base=2)
29 |
30 | wco = write_client_options(success_callback=callback.success,
31 | error_callback=callback.error,
32 | retry_callback=callback.retry,
33 | write_options=write_options
34 | )
35 |
36 | client = InfluxDBClient3(
37 | token="",
38 | host="eu-central-1-1.aws.cloud2.influxdata.com",
39 | database="pokemon-codex", enable_gzip=True, write_client_options=wco)
40 |
41 | now = pd.Timestamp.now(tz='UTC').floor('ms')
42 |
43 | # Lists of possible trainers
44 | trainers = ["ash", "brock", "misty", "gary", "jessie", "james"]
45 |
46 | # Read the CSV into a DataFrame
47 | pokemon_df = pd.read_csv("https://gist.githubusercontent.com/ritchie46/cac6b337ea52281aa23c049250a4ff03/raw/89a957ff3919d90e6ef2d34235e6bf22304f3366/pokemon.csv") # noqa: E501
48 |
49 | # Creating an empty list to store the data
50 | data = []
51 |
52 | # Dictionary to keep track of the number of times each trainer has caught each Pokémon
53 | trainer_pokemon_counts = {}
54 |
55 | # Number of entries we want to create
56 | num_entries = 1000
57 |
58 | # Generating random data
59 | for i in range(num_entries):
60 | trainer = random.choice(trainers)
61 |
62 | # Randomly select a row from pokemon_df
63 | random_pokemon = pokemon_df.sample().iloc[0]
64 | caught = random_pokemon['Name']
65 |
66 | # Count the number of times this trainer has caught this Pokémon
67 | if (trainer, caught) in trainer_pokemon_counts:
68 | trainer_pokemon_counts[(trainer, caught)] += 1
69 | else:
70 | trainer_pokemon_counts[(trainer, caught)] = 1
71 |
72 | # Get the number for this combination of trainer and Pokémon
73 | num = trainer_pokemon_counts[(trainer, caught)]
74 |
75 | entry = {
76 | "trainer": trainer,
77 | "id": f"{0000 + random_pokemon['#']:04d}",
78 | "num": str(num),
79 | "caught": caught,
80 | "level": random.randint(5, 20),
81 | "attack": random_pokemon['Attack'],
82 | "defense": random_pokemon['Defense'],
83 | "hp": random_pokemon['HP'],
84 | "speed": random_pokemon['Speed'],
85 | "type1": random_pokemon['Type 1'],
86 | "type2": random_pokemon['Type 2'],
87 | "timestamp": now
88 | }
89 | data.append(entry)
90 |
91 | # Convert the list of dictionaries to a DataFrame
92 | caught_pokemon_df = pd.DataFrame(data).set_index('timestamp')
93 |
94 | # Print the DataFrame
95 | print(caught_pokemon_df)
96 |
97 | try:
98 | client.write(caught_pokemon_df, data_frame_measurement_name='caught',
99 | data_frame_tag_columns=['trainer', 'id', 'num'])
100 | except Exception as e:
101 | print(f"Error writing point: {e}")
102 |
--------------------------------------------------------------------------------
/Examples/query_async.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import random
3 | import time
4 |
5 | import pandas
6 |
7 | from influxdb_client_3 import InfluxDBClient3
8 |
9 | from config import Config
10 |
11 |
12 | async def fibio(iterations, grit=0.5):
13 | """
14 | example coroutine to run parallel with query_async
15 | :param iterations:
16 | :param grit:
17 | :return:
18 | """
19 | n0 = 1
20 | n1 = 1
21 | vals = [n0, n1]
22 | for _ in range(iterations):
23 | val = n0 + n1
24 | n0 = n1
25 | n1 = val
26 | print(val)
27 | vals.append(val)
28 | await asyncio.sleep(grit)
29 | return vals
30 |
31 |
32 | def write_data(client: InfluxDBClient3, measurement):
33 | """
34 | Synchronous write - only for preparing data
35 | :param client:
36 | :param measurement:
37 | :return:
38 | """
39 | ids = ['s3b1', 'dq41', 'sgw22']
40 | lp_template = f"{measurement},id=%s speed=%f,alt=%f,bearing=%f %d"
41 | data_size = 10
42 | data = []
43 | interval = 10 * 1_000_000_000
44 | ts = time.time_ns() - (interval * data_size)
45 | for _ in range(data_size):
46 | data.append(lp_template % (ids[random.randint(0, len(ids) - 1)],
47 | random.random() * 300,
48 | random.random() * 2000,
49 | random.random() * 360, ts))
50 | ts += interval
51 |
52 | client.write(data)
53 |
54 |
55 | async def query_data(client: InfluxDBClient3, measurement):
56 | """
57 | Query asynchronously - should not block other coroutines
58 | :param client:
59 | :param measurement:
60 | :return:
61 | """
62 | query = f"SELECT * FROM \"{measurement}\" WHERE time >= now() - interval '5 minutes' ORDER BY time DESC"
63 | print(f"query start: {pandas.Timestamp(time.time_ns())}")
64 | table = await client.query_async(query)
65 | print(f"query returned: {pandas.Timestamp(time.time_ns())}")
66 | return table.to_pandas()
67 |
68 |
69 | async def main():
70 | config = Config()
71 | client = InfluxDBClient3(
72 | host=config.host,
73 | token=config.token,
74 | database=config.database,
75 | )
76 | measurement = 'example_uav'
77 | write_data(client, measurement)
78 |
79 | # run both coroutines simultaneously
80 | result = await asyncio.gather(fibio(10, 0.2), query_data(client, measurement))
81 | print(f"fibio sequence = {result[0]}")
82 | print(f"data set =\n{result[1]}")
83 |
84 |
85 | if __name__ == "__main__":
86 | asyncio.run(main())
87 |
--------------------------------------------------------------------------------
/Examples/query_type.py:
--------------------------------------------------------------------------------
1 | import influxdb_client_3 as InfluxDBClient3
2 |
3 | client = InfluxDBClient3.InfluxDBClient3(
4 | token="",
5 | host="eu-central-1-1.aws.cloud2.influxdata.com",
6 | database="factory")
7 |
8 |
9 | # Chunk mode provides a FlightReader object that can be used to read chunks of data.
10 | reader = client.query(
11 | query="SELECT * FROM machine_data WHERE time > now() - 2h",
12 | language="influxql", mode="chunk")
13 |
14 | try:
15 | while True:
16 | batch, buff = reader.read_chunk()
17 | print("batch:")
18 | print(batch.to_pandas())
19 | except StopIteration:
20 | print("No more chunks to read")
21 |
22 |
23 | # Pandas mode provides a Pandas DataFrame object.
24 | df = client.query(
25 | query="SELECT * FROM machine_data WHERE time > now() - 2h",
26 | language="influxql", mode="pandas")
27 |
28 | print("pandas:")
29 | print(df)
30 |
31 | # All mode provides an Arrow Table object.
32 | table = client.query(
33 | query="SELECT * FROM machine_data WHERE time > now() - 2h",
34 | language="influxql", mode="all")
35 |
36 | print("table:")
37 | print(table)
38 |
39 | # Print the schema of the table
40 | table = client.query(
41 | query="SELECT * FROM machine_data WHERE time > now() - 2h",
42 | language="influxql", mode="schema")
43 |
44 | print("schema:")
45 | print(table)
46 |
47 | # Convert this reader into a regular RecordBatchReader
48 | reader = client.query(
49 | query="SELECT * FROM machine_data WHERE time > now() - 2h",
50 | language="influxql", mode="reader")
51 |
52 | print("reader:")
53 | for batch in reader:
54 | print(batch.to_pandas())
55 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 | # InfluxDB 3.0 Python Client
27 | ## Introduction
28 |
29 | `influxdb_client_3` is a Python module that provides a simple and convenient way to interact with InfluxDB 3.0. This module supports both writing data to InfluxDB and querying data using the Flight client, which allows you to execute SQL and InfluxQL queries on InfluxDB 3.0.
30 |
31 | We offer a ["Getting Started: InfluxDB 3.0 Python Client Library"](https://www.youtube.com/watch?v=tpdONTm1GC8) video that goes over how to use the library and goes over the examples.
32 | ## Dependencies
33 |
34 | - `pyarrow` (automatically installed)
35 | - `pandas` (optional)
36 |
37 |
38 | ## Installation
39 |
40 | You can install 'influxdb3-python' using `pip`:
41 |
42 | ```bash
43 | pip install influxdb3-python
44 | ```
45 |
46 | Note: This does not include Pandas support. If you would like to use key features such as `to_pandas()` and `write_file()` you will need to install `pandas` separately.
47 |
48 | *Note: Please make sure you are using 3.6 or above. For the best performance use 3.11+*
49 |
50 | # Usage
51 | One of the easiest ways to get started is to checkout the ["Pokemon Trainer Cookbook"](Examples/pokemon-trainer/cookbook.ipynb). This scenario takes you through the basics of both the client library and Pyarrow.
52 |
53 | ## Importing the Module
54 | ```python
55 | from influxdb_client_3 import InfluxDBClient3, Point
56 | ```
57 |
58 | ## Initialization
59 | If you are using InfluxDB Cloud, then you should note that:
60 | 1. Use bucket name for the `database` argument.
61 |
62 | ```python
63 | client = InfluxDBClient3(token="your-token",
64 | host="your-host",
65 | database="your-database")
66 | ```
67 |
68 | ## Writing Data
69 | You can write data using the Point class, or supplying line protocol.
70 |
71 | ### Using Points
72 | ```python
73 | point = Point("measurement").tag("location", "london").field("temperature", 42)
74 | client.write(point)
75 | ```
76 | ### Using Line Protocol
77 | ```python
78 | point = "measurement fieldname=0"
79 | client.write(point)
80 | ```
81 |
82 | ### Write from file
83 | Users can import data from CSV, JSON, Feather, ORC, Parquet
84 | ```python
85 | import influxdb_client_3 as InfluxDBClient3
86 | import pandas as pd
87 | import numpy as np
88 | from influxdb_client_3 import write_client_options, WritePrecision, WriteOptions, InfluxDBError
89 |
90 |
91 | class BatchingCallback(object):
92 |
93 | def __init__(self):
94 | self.write_count = 0
95 |
96 | def success(self, conf, data: str):
97 | self.write_count += 1
98 | print(f"Written batch: {conf}, data: {data}")
99 |
100 | def error(self, conf, data: str, exception: InfluxDBError):
101 | print(f"Cannot write batch: {conf}, data: {data} due: {exception}")
102 |
103 | def retry(self, conf, data: str, exception: InfluxDBError):
104 | print(f"Retryable error occurs for batch: {conf}, data: {data} retry: {exception}")
105 |
106 | callback = BatchingCallback()
107 |
108 | write_options = WriteOptions(batch_size=100,
109 | flush_interval=10_000,
110 | jitter_interval=2_000,
111 | retry_interval=5_000,
112 | max_retries=5,
113 | max_retry_delay=30_000,
114 | exponential_base=2)
115 |
116 | wco = write_client_options(success_callback=callback.success,
117 | error_callback=callback.error,
118 | retry_callback=callback.retry,
119 | write_options=write_options
120 | )
121 |
122 | with InfluxDBClient3.InfluxDBClient3(
123 | token="INSERT_TOKEN",
124 | host="eu-central-1-1.aws.cloud2.influxdata.com",
125 | database="python", write_client_options=wco) as client:
126 |
127 |
128 | client.write_file(
129 | file='./out.csv',
130 | timestamp_column='time', tag_columns=["provider", "machineID"])
131 |
132 | print(f'DONE writing from csv in {callback.write_count} batch(es)')
133 |
134 | ```
135 |
136 | ### Pandas DF
137 | ```python
138 | client._write_api.write(bucket="pokemon-codex", record=pd_df, data_frame_measurement_name='caught', data_frame_tag_columns=['trainer', 'id', 'num'], data_frame_timestamp_column='timestamp')
139 | ```
140 |
141 | ### Polars DF
142 | ```python
143 | client._write_api.write(bucket="pokemon-codex", record=pl_df, data_frame_measurement_name='caught', data_frame_tag_columns=['trainer', 'id', 'num'], data_frame_timestamp_column='timestamp')
144 | ```
145 |
146 | ## Querying
147 |
148 | ### Querying with SQL
149 | ```python
150 | query = "select * from measurement"
151 | reader = client.query(query=query, language="sql")
152 | table = reader.read_all()
153 | print(table.to_pandas().to_markdown())
154 | ```
155 |
156 | ### Querying with influxql
157 | ```python
158 | query = "select * from measurement"
159 | reader = client.query(query=query, language="influxql")
160 | table = reader.read_all()
161 | print(table.to_pandas().to_markdown())
162 | ```
163 |
164 | ## Windows Users
165 | Currently, Windows users require an extra installation when querying via Flight natively. This is due to the fact gRPC cannot locate Windows root certificates. To work around this please follow these steps:
166 | Install `certifi`
167 | ```bash
168 | pip install certifi
169 | ```
170 | Next include certifi within the flight client options:
171 |
172 | ```python
173 | import certifi
174 |
175 | import influxdb_client_3 as InfluxDBClient3
176 | from influxdb_client_3 import flight_client_options
177 |
178 | fh = open(certifi.where(), "r")
179 | cert = fh.read()
180 | fh.close()
181 |
182 | client = InfluxDBClient3.InfluxDBClient3(
183 | token="",
184 | host="b0c7cce5-8dbc-428e-98c6-7f996fb96467.a.influxdb.io",
185 | database="flightdemo",
186 | flight_client_options=flight_client_options(
187 | tls_root_certs=cert))
188 |
189 | table = client.query(
190 | query="SELECT * FROM flight WHERE time > now() - 4h",
191 | language="influxql")
192 |
193 | print(table.to_pandas())
194 | ```
195 | You may also include your own root certificate via this manor aswell.
196 |
197 | # Contributing
198 |
199 | Tests are run using `pytest`.
200 |
201 | ```bash
202 | # Clone the repository
203 | git clone https://github.com/InfluxCommunity/influxdb3-python
204 | cd influxdb3-python
205 |
206 | # Create a virtual environment and activate it
207 | python3 -m venv .venv
208 | source .venv/bin/activate
209 |
210 | # Install the package and its dependencies
211 | pip install -e .[pandas,polars,dataframe,test]
212 |
213 | # Run the tests
214 | python -m pytest .
215 | ```
216 |
--------------------------------------------------------------------------------
/docs/readme.md:
--------------------------------------------------------------------------------
1 |
2 | # Arrow Flight Error Guide
3 |
4 | This guide provides details on errors returned by Arrow Flight, along with potential reasons for each error. If you encounter an error not listed here, please raise an issue or reach out for assistance.
5 |
6 | ## Table of Contents
7 |
8 | - [Arrow Flight Error Guide](#arrow-flight-error-guide)
9 | - [Table of Contents](#table-of-contents)
10 | - [Errors](#errors)
11 | - [Internal Error: Received RST\_STREAM](#internal-error-received-rst_stream)
12 | - [Internal Error: stream terminated by RST\_STREAM with NO\_ERROR](#internal-error-stream-terminated-by-rst_stream-with-no_error)
13 | - [ArrowInvalid: Flight returned invalid argument error with message: bucket "" not found](#arrowinvalid-flight-returned-invalid-argument-error-with-message-bucket--not-found)
14 | - [Contributions](#contributions)
15 |
16 | ## Errors
17 |
18 | ### Internal Error: Received RST_STREAM
19 |
20 | **Error Message:**
21 | `Flight returned internal error, with message: Received RST_STREAM with error code 2. gRPC client debug context: UNKNOWN:Error received from peer ipv4:34.196.233.7:443 {grpc_message:"Received RST_STREAM with error code 2"}`
22 |
23 | **Potential Reasons:**
24 | - The connection to the server was reset unexpectedly.
25 | - Network issues between the client and server.
26 | - Server might have closed the connection due to an internal error.
27 | - The client exceeded the server's maximum number of concurrent streams.
28 |
29 | ### Internal Error: stream terminated by RST_STREAM with NO_ERROR
30 |
31 | **Error Message:**
32 | `pyarrow._flight.FlightInternalError: Flight returned internal error, with message: stream terminated by RST_STREAM with error code: NO_ERROR. gRPC client debug context: UNKNOWN:Error received from peer ipv4:3.123.149.45:443 {created_time:"2023-07-26T14:12:44.992317+02:00", grpc_status:13, grpc_message:"stream terminated by RST_STREAM with error code: NO_ERROR"}. Client context: OK`
33 |
34 | **Potential Reasons:**
35 | - The server terminated the stream, but there wasn't any specific error associated with it.
36 | - Possible network disruption, even if it's temporary.
37 | - The server might have reached its maximum capacity or other internal limits.
38 | - Unspecified server-side issues that led to the termination of the stream.
39 |
40 | ### ArrowInvalid: Flight returned invalid argument error with message: bucket "" not found
41 |
42 | **Error Message:**
43 | `ArrowInvalid: Flight returned invalid argument error, with message: bucket "otel5" not found. gRPC client debug context: UNKNOWN:Error received from peer ipv4:3.123.149.45:443 {grpc_message:"bucket \"otel5\" not found", grpc_status:3, created_time:"2023-08-09T16:37:30.093946+01:00"}. Client context: IOError: Server never sent a data message. Detail: Internal`
44 |
45 | **Potential Reasons:**
46 | - The database has not been created within the current InfluxDB instance.
47 |
48 |
49 | ## Contributions
50 |
51 | We welcome contributions to this guide. If you've encountered an Arrow Flight error not listed here, please raise an issue or submit a pull request.
52 |
--------------------------------------------------------------------------------
/influxdb_client_3/exceptions/__init__.py:
--------------------------------------------------------------------------------
1 | # flake8: noqa
2 |
3 | from .exceptions import InfluxDB3ClientQueryError, InfluxDBError, InfluxDB3ClientError
4 |
--------------------------------------------------------------------------------
/influxdb_client_3/exceptions/exceptions.py:
--------------------------------------------------------------------------------
1 | """Exceptions utils for InfluxDB."""
2 |
3 | import logging
4 |
5 | from urllib3 import HTTPResponse
6 |
7 | logger = logging.getLogger('influxdb_client_3.exceptions')
8 |
9 |
10 | class InfluxDB3ClientError(Exception):
11 | """
12 | Exception raised for errors in the InfluxDB client operations.
13 |
14 | Represents errors that occur during interactions with the InfluxDB
15 | database client. This exception is a general base class for more
16 | specific client-related failures and is typically used to signal issues
17 | such as invalid queries, connection failures, or API misusage.
18 | """
19 | pass
20 |
21 |
22 | # This error is for all query operations
23 | class InfluxDB3ClientQueryError(InfluxDB3ClientError):
24 | """
25 | Represents an error that occurs when querying an InfluxDB client.
26 |
27 | This class is specifically designed to handle errors originating from
28 | client queries to an InfluxDB database. It extends the general
29 | `InfluxDBClientError`, allowing more precise identification and
30 | handling of query-related issues.
31 |
32 | :ivar message: Contains the specific error message describing the
33 | query error.
34 | :type message: str
35 | """
36 |
37 | def __init__(self, error_message, *args, **kwargs):
38 | super().__init__(error_message, *args, **kwargs)
39 | self.message = error_message
40 |
41 |
42 | # This error is for all write operations
43 | class InfluxDBError(InfluxDB3ClientError):
44 | """Raised when a server error occurs."""
45 |
46 | def __init__(self, response: HTTPResponse = None, message: str = None):
47 | """Initialize the InfluxDBError handler."""
48 | if response is not None:
49 | self.response = response
50 | self.message = self._get_message(response)
51 | self.retry_after = response.getheader('Retry-After')
52 | else:
53 | self.response = None
54 | self.message = message or 'no response'
55 | self.retry_after = None
56 | super().__init__(self.message)
57 |
58 | def _get_message(self, response):
59 | # Body
60 | if response.data:
61 | import json
62 |
63 | def get(d, key):
64 | if not key or d is None:
65 | return d
66 | return get(d.get(key[0]), key[1:])
67 | try:
68 | node = json.loads(response.data)
69 | for key in [['message'], ['data', 'error_message'], ['error']]:
70 | value = get(node, key)
71 | if value is not None:
72 | return value
73 | return response.data
74 | except Exception as e:
75 | logging.debug(f"Cannot parse error response to JSON: {response.data}, {e}")
76 | return response.data
77 |
78 | # Header
79 | for header_key in ["X-Platform-Error-Code", "X-Influx-Error", "X-InfluxDb-Error"]:
80 | header_value = response.getheader(header_key)
81 | if header_value is not None:
82 | return header_value
83 |
84 | # Http Status
85 | return response.reason
86 |
87 | def getheaders(self):
88 | """Helper method to make response headers more accessible."""
89 | return self.response.getheaders()
90 |
--------------------------------------------------------------------------------
/influxdb_client_3/py.typed:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/InfluxCommunity/influxdb3-python/7b9e8027abc22eeb9ce7236f46b68aa31b706c2b/influxdb_client_3/py.typed
--------------------------------------------------------------------------------
/influxdb_client_3/query/__init__.py:
--------------------------------------------------------------------------------
1 | """Package for query module."""
2 |
--------------------------------------------------------------------------------
/influxdb_client_3/read_file.py:
--------------------------------------------------------------------------------
1 | import os
2 | import pyarrow.csv as csv
3 | import pyarrow.feather as feather
4 | import pyarrow.parquet as parquet
5 |
6 | # Check if the OS is not Windows
7 | if os.name != 'nt':
8 | import pyarrow.orc as orc
9 |
10 |
11 | class UploadFile:
12 | """
13 | Class for uploading and reading different types of files.
14 | """
15 |
16 | def __init__(self, file, file_parser_options=None):
17 | """
18 | Initialize an UploadFile instance.
19 |
20 | :param file: The file to upload.
21 | :type file: str
22 | :param kwargs: Additional arguments for file loading functions.
23 | """
24 |
25 | self._file = file
26 | self._kwargs = file_parser_options if file_parser_options is not None else {}
27 |
28 | def load_file(self):
29 | """
30 | Load a file based on its extension.
31 |
32 | :return: The loaded file.
33 | :raises ValueError: If the file type is not supported.
34 | """
35 | if self._file.endswith(".feather"):
36 | return self.load_feather(self._file)
37 | elif self._file.endswith(".parquet"):
38 | return self.load_parquet(self._file)
39 | elif self._file.endswith(".csv"):
40 | return self.load_csv(self._file)
41 | elif self._file.endswith(".json"):
42 | return self.load_json(self._file)
43 | elif self._file.endswith(".orc"):
44 | return self.load_orc(self._file)
45 | else:
46 | raise ValueError("Unsupported file type")
47 |
48 | def load_feather(self, file):
49 | """
50 | Load a Feather file.
51 |
52 | :param file: The Feather file to load.
53 | :type file: str
54 | :return: The loaded Feather file.
55 | """
56 | return feather.read_table(file, **self._kwargs)
57 |
58 | def load_parquet(self, file):
59 | """
60 | Load a Parquet file.
61 |
62 | :param file: The Parquet file to load.
63 | :type file: str
64 | :return: The loaded Parquet file.
65 | """
66 | return parquet.read_table(file, **self._kwargs)
67 |
68 | def load_csv(self, file):
69 | """
70 | Load a CSV file.
71 |
72 | :param file: The CSV file to load.
73 | :type file: str
74 | :return: The loaded CSV file.
75 | """
76 | return csv.read_csv(file, **self._kwargs)
77 |
78 | def load_orc(self, file):
79 | """
80 | Load an ORC file.
81 |
82 | :param file: The ORC file to load.
83 | :type file: str
84 | :return: The loaded ORC file.
85 | :raises ValueError: If the OS is Windows.
86 | """
87 | if os.name == 'nt':
88 | raise ValueError("Unsupported file type for this OS")
89 | else:
90 | return orc.read_table(file, **self._kwargs)
91 |
92 | def load_json(self, file):
93 | """
94 | Load a JSON file.
95 |
96 | :param file: The JSON file to load.
97 | :type file: str
98 | :return: The loaded JSON file.
99 | """
100 | try:
101 | import pandas as pd
102 | except ImportError:
103 | raise ImportError("Pandas is required for write_file(). Please install it using 'pip install pandas' or "
104 | "'pip install influxdb3-python[pandas]'")
105 |
106 | return pd.read_json(file, **self._kwargs)
107 |
--------------------------------------------------------------------------------
/influxdb_client_3/version.py:
--------------------------------------------------------------------------------
1 | """Version of the Client that is used in User-Agent header."""
2 |
3 | VERSION = '0.14.0dev0'
4 | USER_AGENT = f'influxdb3-python/{VERSION}'
5 |
--------------------------------------------------------------------------------
/influxdb_client_3/write_client/__init__.py:
--------------------------------------------------------------------------------
1 | # coding: utf-8
2 |
3 | # flake8: noqa
4 |
5 | from __future__ import absolute_import
6 |
7 | from influxdb_client_3.write_client.client.write_api import WriteApi, WriteOptions
8 | from influxdb_client_3.write_client.client.influxdb_client import InfluxDBClient
9 | from influxdb_client_3.write_client.client.logging_handler import InfluxLoggingHandler
10 | from influxdb_client_3.write_client.client.write.point import Point
11 |
12 | from influxdb_client_3.write_client.service.write_service import WriteService
13 | from influxdb_client_3.write_client.service.signin_service import SigninService
14 | from influxdb_client_3.write_client.service.signout_service import SignoutService
15 |
16 |
17 | from influxdb_client_3.write_client.domain.write_precision import WritePrecision
18 |
19 | from influxdb_client_3.write_client.configuration import Configuration
20 | from influxdb_client_3.version import VERSION
21 | __version__ = VERSION
22 |
--------------------------------------------------------------------------------
/influxdb_client_3/write_client/_sync/__init__.py:
--------------------------------------------------------------------------------
1 | """Synchronous REST APIs."""
2 |
--------------------------------------------------------------------------------
/influxdb_client_3/write_client/client/__init__.py:
--------------------------------------------------------------------------------
1 | # flake8: noqa
2 |
3 | from __future__ import absolute_import
4 |
5 | # import apis into api package
6 | from influxdb_client_3.write_client.service.signin_service import SigninService
7 | from influxdb_client_3.write_client.service.signout_service import SignoutService
8 | from influxdb_client_3.write_client.service.write_service import WriteService
9 |
--------------------------------------------------------------------------------
/influxdb_client_3/write_client/client/logging_handler.py:
--------------------------------------------------------------------------------
1 | """Use the influxdb_client with python native logging."""
2 | import logging
3 |
4 | from influxdb_client_3.write_client import InfluxDBClient
5 |
6 |
7 | class InfluxLoggingHandler(logging.Handler):
8 | """
9 | InfluxLoggingHandler instances dispatch logging events to influx.
10 |
11 | There is no need to set a Formatter.
12 | The raw input will be passed on to the influx write api.
13 | """
14 |
15 | DEFAULT_LOG_RECORD_KEYS = list(logging.makeLogRecord({}).__dict__.keys()) + ['message']
16 |
17 | def __init__(self, *, url, token, org, bucket, client_args=None, write_api_args=None):
18 | """
19 | Initialize defaults.
20 |
21 | The arguments `client_args` and `write_api_args` can be dicts of kwargs.
22 | They are passed on to the InfluxDBClient and write_api calls respectively.
23 | """
24 | super().__init__()
25 |
26 | self.bucket = bucket
27 |
28 | client_args = {} if client_args is None else client_args
29 | self.client = InfluxDBClient(url=url, token=token, org=org, **client_args)
30 |
31 | write_api_args = {} if write_api_args is None else write_api_args
32 | self.write_api = self.client.write_api(**write_api_args)
33 |
34 | def __del__(self):
35 | """Make sure all resources are closed."""
36 | self.close()
37 |
38 | def close(self) -> None:
39 | """Close the write_api, client and logger."""
40 | self.write_api.close()
41 | self.client.close()
42 | super().close()
43 |
44 | def emit(self, record: logging.LogRecord) -> None:
45 | """Emit a record via the influxDB WriteApi."""
46 | try:
47 | message = self.format(record)
48 | extra = self._get_extra_values(record)
49 | return self.write_api.write(record=message, **extra)
50 | except (KeyboardInterrupt, SystemExit):
51 | raise
52 | except (Exception,):
53 | self.handleError(record)
54 |
55 | def _get_extra_values(self, record: logging.LogRecord) -> dict:
56 | """
57 | Extract all items from the record that were injected via extra.
58 |
59 | Example: `logging.debug(msg, extra={key: value, ...})`.
60 | """
61 | extra = {'bucket': self.bucket}
62 | extra.update({key: value for key, value in record.__dict__.items()
63 | if key not in self.DEFAULT_LOG_RECORD_KEYS})
64 | return extra
65 |
--------------------------------------------------------------------------------
/influxdb_client_3/write_client/client/util/__init__.py:
--------------------------------------------------------------------------------
1 | """Utils package."""
2 |
--------------------------------------------------------------------------------
/influxdb_client_3/write_client/client/util/date_utils.py:
--------------------------------------------------------------------------------
1 | """Utils to get right Date parsing function."""
2 | import datetime
3 | import threading
4 | from datetime import timezone as tz
5 | from sys import version_info
6 |
7 | from dateutil import parser
8 |
9 | date_helper = None
10 |
11 | lock_ = threading.Lock()
12 |
13 |
14 | class DateHelper:
15 | """
16 | DateHelper to groups different implementations of date operations.
17 |
18 | If you would like to serialize the query results to custom timezone, you can use following code:
19 |
20 | .. code-block:: python
21 |
22 | from influxdb_client.client.util import date_utils
23 | from influxdb_client.client.util.date_utils import DateHelper
24 | import dateutil.parser
25 | from dateutil import tz
26 |
27 | def parse_date(date_string: str):
28 | return dateutil.parser.parse(date_string).astimezone(tz.gettz('ETC/GMT+2'))
29 |
30 | date_utils.date_helper = DateHelper()
31 | date_utils.date_helper.parse_date = parse_date
32 | """
33 |
34 | def __init__(self, timezone: datetime.tzinfo = tz.utc) -> None:
35 | """
36 | Initialize defaults.
37 |
38 | :param timezone: Default timezone used for serialization "datetime" without "tzinfo".
39 | Default value is "UTC".
40 | """
41 | self.timezone = timezone
42 |
43 | def parse_date(self, date_string: str):
44 | """
45 | Parse string into Date or Timestamp.
46 |
47 | :return: Returns a :class:`datetime.datetime` object or compliant implementation
48 | like :class:`class 'pandas._libs.tslibs.timestamps.Timestamp`
49 | """
50 | pass
51 |
52 | def to_nanoseconds(self, delta):
53 | """
54 | Get number of nanoseconds in timedelta.
55 |
56 | Solution comes from v1 client. Thx.
57 | https://github.com/influxdata/influxdb-python/pull/811
58 | """
59 | nanoseconds_in_days = delta.days * 86400 * 10 ** 9
60 | nanoseconds_in_seconds = delta.seconds * 10 ** 9
61 | nanoseconds_in_micros = delta.microseconds * 10 ** 3
62 |
63 | return nanoseconds_in_days + nanoseconds_in_seconds + nanoseconds_in_micros
64 |
65 | def to_utc(self, value: datetime):
66 | """
67 | Convert datetime to UTC timezone.
68 |
69 | :param value: datetime
70 | :return: datetime in UTC
71 | """
72 | if not value.tzinfo:
73 | return self.to_utc(value.replace(tzinfo=self.timezone))
74 | else:
75 | return value.astimezone(tz.utc)
76 |
77 |
78 | def get_date_helper() -> DateHelper:
79 | """
80 | Return DateHelper with proper implementation.
81 |
82 | If there is a 'ciso8601' than use 'ciso8601.parse_datetime' else use 'dateutil.parse'.
83 | """
84 | global date_helper
85 | if date_helper is None:
86 | with lock_:
87 | # avoid duplicate initialization
88 | if date_helper is None:
89 | _date_helper = DateHelper()
90 | try:
91 | import ciso8601
92 | _date_helper.parse_date = ciso8601.parse_datetime
93 | except ModuleNotFoundError:
94 | if (version_info.major, version_info.minor) >= (3, 11):
95 | _date_helper.parse_date = datetime.datetime.fromisoformat
96 | else:
97 | _date_helper.parse_date = parser.parse
98 | date_helper = _date_helper
99 |
100 | return date_helper
101 |
--------------------------------------------------------------------------------
/influxdb_client_3/write_client/client/util/date_utils_pandas.py:
--------------------------------------------------------------------------------
1 | """Pandas date utils."""
2 | from influxdb_client_3.write_client.client.util.date_utils import DateHelper
3 | from influxdb_client_3.write_client.extras import pd
4 |
5 |
6 | class PandasDateTimeHelper(DateHelper):
7 | """DateHelper that use Pandas library with nanosecond precision."""
8 |
9 | def parse_date(self, date_string: str):
10 | """Parse date string into `class 'pandas._libs.tslibs.timestamps.Timestamp`."""
11 | return pd.to_datetime(date_string)
12 |
13 | def to_nanoseconds(self, delta):
14 | """Get number of nanoseconds with nanos precision."""
15 | return super().to_nanoseconds(delta) + (delta.nanoseconds if hasattr(delta, 'nanoseconds') else 0)
16 |
--------------------------------------------------------------------------------
/influxdb_client_3/write_client/client/util/helpers.py:
--------------------------------------------------------------------------------
1 | """Functions to share utility across client classes."""
2 | from influxdb_client_3.write_client.rest import ApiException
3 |
4 |
5 | def _is_id(value):
6 | """
7 | Check if the value is valid InfluxDB ID.
8 |
9 | :param value: to check
10 | :return: True if provided parameter is valid InfluxDB ID.
11 | """
12 | if value and len(value) == 16:
13 | try:
14 | int(value, 16)
15 | return True
16 | except ValueError:
17 | return False
18 | return False
19 |
20 |
21 | def get_org_query_param(org, client, required_id=False):
22 | """
23 | Get required type of Org query parameter.
24 |
25 | :param str, Organization org: value provided as a parameter into API (optional)
26 | :param InfluxDBClient client: with default value for Org parameter
27 | :param bool required_id: true if the query param has to be a ID
28 | :return: request type of org query parameter or None
29 | """
30 | _org = client.org if org is None else org
31 | if 'Organization' in type(_org).__name__:
32 | _org = _org.id
33 | if required_id and _org and not _is_id(_org):
34 | try:
35 | organizations = client.organizations_api().find_organizations(org=_org)
36 | if len(organizations) < 1:
37 | from write_client.client.exceptions import InfluxDBError
38 | message = f"The client cannot find organization with name: '{_org}' " \
39 | "to determine their ID. Are you using token with sufficient permission?"
40 | raise InfluxDBError(response=None, message=message)
41 | return organizations[0].id
42 | except ApiException as e:
43 | if e.status == 404:
44 | from write_client.client.exceptions import InfluxDBError
45 | message = f"The client cannot find organization with name: '{_org}' " \
46 | "to determine their ID."
47 | raise InfluxDBError(response=None, message=message)
48 | raise e
49 |
50 | return _org
51 |
--------------------------------------------------------------------------------
/influxdb_client_3/write_client/client/util/multiprocessing_helper.py:
--------------------------------------------------------------------------------
1 | """
2 | Helpers classes to make easier use the client in multiprocessing environment.
3 |
4 | For more information how the multiprocessing works see Python's
5 | `reference docs `_.
6 | """
7 | import logging
8 | import multiprocessing
9 |
10 | from influxdb_client_3.write_client import InfluxDBClient, WriteOptions
11 | from influxdb_client_3.exceptions import InfluxDBError
12 |
13 | logger = logging.getLogger('influxdb_client.client.util.multiprocessing_helper')
14 |
15 |
16 | def _success_callback(conf: (str, str, str), data: str):
17 | """Successfully writen batch."""
18 | logger.debug(f"Written batch: {conf}, data: {data}")
19 |
20 |
21 | def _error_callback(conf: (str, str, str), data: str, exception: InfluxDBError):
22 | """Unsuccessfully writen batch."""
23 | logger.debug(f"Cannot write batch: {conf}, data: {data} due: {exception}")
24 |
25 |
26 | def _retry_callback(conf: (str, str, str), data: str, exception: InfluxDBError):
27 | """Retryable error."""
28 | logger.debug(f"Retryable error occurs for batch: {conf}, data: {data} retry: {exception}")
29 |
30 |
31 | class _PoisonPill:
32 | """To notify process to terminate."""
33 |
34 | pass
35 |
36 |
37 | class MultiprocessingWriter(multiprocessing.Process):
38 | """
39 | The Helper class to write data into InfluxDB in independent OS process.
40 |
41 | Example:
42 | .. code-block:: python
43 |
44 | from influxdb_client import WriteOptions
45 | from influxdb_client.client.util.multiprocessing_helper import MultiprocessingWriter
46 |
47 |
48 | def main():
49 | writer = MultiprocessingWriter(url="http://localhost:8086", token="my-token", org="my-org",
50 | write_options=WriteOptions(batch_size=100))
51 | writer.start()
52 |
53 | for x in range(1, 1000):
54 | writer.write(bucket="my-bucket", record=f"mem,tag=a value={x}i {x}")
55 |
56 | writer.__del__()
57 |
58 |
59 | if __name__ == '__main__':
60 | main()
61 |
62 |
63 | How to use with context_manager:
64 | .. code-block:: python
65 |
66 | from influxdb_client import WriteOptions
67 | from influxdb_client.client.util.multiprocessing_helper import MultiprocessingWriter
68 |
69 |
70 | def main():
71 | with MultiprocessingWriter(url="http://localhost:8086", token="my-token", org="my-org",
72 | write_options=WriteOptions(batch_size=100)) as writer:
73 | for x in range(1, 1000):
74 | writer.write(bucket="my-bucket", record=f"mem,tag=a value={x}i {x}")
75 |
76 |
77 | if __name__ == '__main__':
78 | main()
79 |
80 |
81 | How to handle batch events:
82 | .. code-block:: python
83 |
84 | from influxdb_client import WriteOptions
85 | from influxdb_client.client.exceptions import InfluxDBError
86 | from influxdb_client.client.util.multiprocessing_helper import MultiprocessingWriter
87 |
88 |
89 | class BatchingCallback(object):
90 |
91 | def success(self, conf: (str, str, str), data: str):
92 | print(f"Written batch: {conf}, data: {data}")
93 |
94 | def error(self, conf: (str, str, str), data: str, exception: InfluxDBError):
95 | print(f"Cannot write batch: {conf}, data: {data} due: {exception}")
96 |
97 | def retry(self, conf: (str, str, str), data: str, exception: InfluxDBError):
98 | print(f"Retryable error occurs for batch: {conf}, data: {data} retry: {exception}")
99 |
100 |
101 | def main():
102 | callback = BatchingCallback()
103 | with MultiprocessingWriter(url="http://localhost:8086", token="my-token", org="my-org",
104 | success_callback=callback.success,
105 | error_callback=callback.error,
106 | retry_callback=callback.retry) as writer:
107 |
108 | for x in range(1, 1000):
109 | writer.write(bucket="my-bucket", record=f"mem,tag=a value={x}i {x}")
110 |
111 |
112 | if __name__ == '__main__':
113 | main()
114 |
115 |
116 | """
117 |
118 | __started__ = False
119 | __disposed__ = False
120 |
121 | def __init__(self, **kwargs) -> None:
122 | """
123 | Initialize defaults.
124 |
125 | For more information how to initialize the writer see the examples above.
126 |
127 | :param kwargs: arguments are passed into ``__init__`` function of ``InfluxDBClient`` and ``write_api``.
128 | """
129 | multiprocessing.Process.__init__(self)
130 | self.kwargs = kwargs
131 | self.client = None
132 | self.write_api = None
133 | self.queue_ = multiprocessing.Manager().Queue()
134 |
135 | def write(self, **kwargs) -> None:
136 | """
137 | Append time-series data into underlying queue.
138 |
139 | For more information how to pass arguments see the examples above.
140 |
141 | :param kwargs: arguments are passed into ``write`` function of ``WriteApi``
142 | :return: None
143 | """
144 | assert self.__disposed__ is False, 'Cannot write data: the writer is closed.'
145 | assert self.__started__ is True, 'Cannot write data: the writer is not started.'
146 | self.queue_.put(kwargs)
147 |
148 | def run(self):
149 | """Initialize ``InfluxDBClient`` and waits for data to writes into InfluxDB."""
150 | # Initialize Client and Write API
151 | self.client = InfluxDBClient(**self.kwargs)
152 | self.write_api = self.client.write_api(write_options=self.kwargs.get('write_options', WriteOptions()),
153 | success_callback=self.kwargs.get('success_callback', _success_callback),
154 | error_callback=self.kwargs.get('error_callback', _error_callback),
155 | retry_callback=self.kwargs.get('retry_callback', _retry_callback))
156 | # Infinite loop - until poison pill
157 | while True:
158 | next_record = self.queue_.get()
159 | if type(next_record) is _PoisonPill:
160 | # Poison pill means break the loop
161 | self.terminate()
162 | self.queue_.task_done()
163 | break
164 | self.write_api.write(**next_record)
165 | self.queue_.task_done()
166 |
167 | def start(self) -> None:
168 | """Start independent process for writing data into InfluxDB."""
169 | super().start()
170 | self.__started__ = True
171 |
172 | def terminate(self) -> None:
173 | """
174 | Cleanup resources in independent process.
175 |
176 | This function **cannot be used** to terminate the ``MultiprocessingWriter``.
177 | If you want to finish your writes please call: ``__del__``.
178 | """
179 | if self.write_api:
180 | logger.info("flushing data...")
181 | self.write_api.__del__()
182 | self.write_api = None
183 | if self.client:
184 | self.client.__del__()
185 | self.client = None
186 | logger.info("closed")
187 |
188 | def __enter__(self):
189 | """Enter the runtime context related to this object."""
190 | self.start()
191 | return self
192 |
193 | def __exit__(self, exc_type, exc_value, traceback):
194 | """Exit the runtime context related to this object."""
195 | self.__del__()
196 |
197 | def __del__(self):
198 | """Dispose the client and write_api."""
199 | if self.__started__:
200 | self.queue_.put(_PoisonPill())
201 | self.queue_.join()
202 | self.join()
203 | self.queue_ = None
204 | self.__started__ = False
205 | self.__disposed__ = True
206 |
--------------------------------------------------------------------------------
/influxdb_client_3/write_client/client/warnings.py:
--------------------------------------------------------------------------------
1 | """The warnings message definition."""
2 | import warnings
3 |
4 |
5 | class MissingPivotFunction(UserWarning):
6 | """User warning about missing pivot() function."""
7 |
8 | @staticmethod
9 | def print_warning(query: str):
10 | """Print warning about missing pivot() function and how to deal with that."""
11 | if 'fieldsAsCols' in query or 'pivot' in query:
12 | return
13 |
14 | message = f"""The query doesn't contains the pivot() function.
15 |
16 | The result will not be shaped to optimal processing by pandas.DataFrame. Use the pivot() function by:
17 |
18 | {query} |> pivot(rowKey:["_time"], columnKey: ["_field"], valueColumn: "_value")
19 |
20 | You can disable this warning by:
21 | import warnings
22 | from influxdb_client.client.warnings import MissingPivotFunction
23 |
24 | warnings.simplefilter("ignore", MissingPivotFunction)
25 |
26 | For more info see:
27 | - https://docs.influxdata.com/resources/videos/pivots-in-flux/
28 | - https://docs.influxdata.com/flux/latest/stdlib/universe/pivot/
29 | - https://docs.influxdata.com/flux/latest/stdlib/influxdata/influxdb/schema/fieldsascols/
30 | """
31 | warnings.warn(message, MissingPivotFunction)
32 |
33 |
34 | class CloudOnlyWarning(UserWarning):
35 | """User warning about availability only on the InfluxDB Cloud."""
36 |
37 | @staticmethod
38 | def print_warning(api_name: str, doc_url: str):
39 | """Print warning about availability only on the InfluxDB Cloud."""
40 | message = f"""The '{api_name}' is available only on the InfluxDB Cloud.
41 |
42 | For more info see:
43 | - {doc_url}
44 | - https://docs.influxdata.com/influxdb/cloud/
45 |
46 | You can disable this warning by:
47 | import warnings
48 | from influxdb_client.client.warnings import CloudOnlyWarning
49 |
50 | warnings.simplefilter("ignore", CloudOnlyWarning)
51 | """
52 | warnings.warn(message, CloudOnlyWarning)
53 |
--------------------------------------------------------------------------------
/influxdb_client_3/write_client/client/write/__init__.py:
--------------------------------------------------------------------------------
1 | # flake8: noqa
2 |
3 | from __future__ import absolute_import
4 |
5 | # import apis into api package
6 | from influxdb_client_3.write_client.service.signin_service import SigninService
7 | from influxdb_client_3.write_client.service.signout_service import SignoutService
8 | from influxdb_client_3.write_client.service.write_service import WriteService
9 |
--------------------------------------------------------------------------------
/influxdb_client_3/write_client/client/write/polars_dataframe_serializer.py:
--------------------------------------------------------------------------------
1 | """
2 | Functions for serialize Polars DataFrame.
3 |
4 | Much of the code here is inspired by that in the aioinflux packet found here: https://github.com/gusutabopb/aioinflux
5 | """
6 |
7 | import logging
8 | import math
9 |
10 | from influxdb_client_3.write_client.client.write.point import _ESCAPE_KEY, _ESCAPE_STRING, DEFAULT_WRITE_PRECISION
11 |
12 | logger = logging.getLogger('influxdb_client.client.write.polars_dataframe_serializer')
13 |
14 |
15 | class PolarsDataframeSerializer:
16 | """Serialize DataFrame into LineProtocols."""
17 |
18 | def __init__(self, data_frame, point_settings, precision=DEFAULT_WRITE_PRECISION, chunk_size: int = None,
19 | **kwargs) -> None:
20 | """
21 | Init serializer.
22 |
23 | :param data_frame: Polars DataFrame to serialize
24 | :param point_settings: Default Tags
25 | :param precision: The precision for the unix timestamps within the body line-protocol.
26 | :param chunk_size: The size of chunk for serializing into chunks.
27 | :key data_frame_measurement_name: name of measurement for writing Polars DataFrame
28 | :key data_frame_tag_columns: list of DataFrame columns which are tags, rest columns will be fields
29 | :key data_frame_timestamp_column: name of DataFrame column which contains a timestamp.
30 | :key data_frame_timestamp_timezone: name of the timezone which is used for timestamp column
31 | """
32 |
33 | self.data_frame = data_frame
34 | self.point_settings = point_settings
35 | self.precision = precision
36 | self.chunk_size = chunk_size
37 | self.measurement_name = kwargs.get("data_frame_measurement_name", "measurement")
38 | self.tag_columns = kwargs.get("data_frame_tag_columns", [])
39 | self.timestamp_column = kwargs.get("data_frame_timestamp_column", None)
40 | self.timestamp_timezone = kwargs.get("data_frame_timestamp_timezone", None)
41 |
42 | self.column_indices = {name: index for index, name in enumerate(data_frame.columns)}
43 |
44 | if self.timestamp_column is None or self.timestamp_column not in self.column_indices:
45 | raise ValueError(
46 | f"Timestamp column {self.timestamp_column} not found in DataFrame. Please define a valid timestamp "
47 | f"column.")
48 |
49 | #
50 | # prepare chunks
51 | #
52 | if chunk_size is not None:
53 | self.number_of_chunks = int(math.ceil(len(data_frame) / float(chunk_size)))
54 | self.chunk_size = chunk_size
55 | else:
56 | self.number_of_chunks = None
57 |
58 | def escape_key(self, value):
59 | return str(value).translate(_ESCAPE_KEY)
60 |
61 | def escape_value(self, value):
62 | return str(value).translate(_ESCAPE_STRING)
63 |
64 | def to_line_protocol(self, row):
65 | # Filter out None or empty values for tags
66 | tags = ""
67 |
68 | tags = ",".join(
69 | f'{self.escape_key(col)}={self.escape_key(row[self.column_indices[col]])}'
70 | for col in self.tag_columns
71 | if row[self.column_indices[col]] is not None and row[self.column_indices[col]] != ""
72 | )
73 |
74 | if self.point_settings.defaultTags:
75 | default_tags = ",".join(
76 | f'{self.escape_key(key)}={self.escape_key(value)}'
77 | for key, value in self.point_settings.defaultTags.items()
78 | )
79 | # Ensure there's a comma between existing tags and default tags if both are present
80 | if tags and default_tags:
81 | tags += ","
82 | tags += default_tags
83 |
84 | # add escape symbols for special characters to tags
85 |
86 | fields = ",".join(
87 | f"{col}=\"{self.escape_value(row[self.column_indices[col]])}\"" if isinstance(row[self.column_indices[col]],
88 | str)
89 | else f"{col}={str(row[self.column_indices[col]]).lower()}" if isinstance(row[self.column_indices[col]],
90 | bool) # Check for bool first
91 | else f"{col}={row[self.column_indices[col]]}i" if isinstance(row[self.column_indices[col]], int)
92 | else f"{col}={row[self.column_indices[col]]}"
93 | for col in self.column_indices
94 | if col not in self.tag_columns + [self.timestamp_column] and
95 | row[self.column_indices[col]] is not None and row[self.column_indices[col]] != ""
96 | )
97 |
98 | # Access the Unix timestamp
99 | timestamp = row[self.column_indices[self.timestamp_column]]
100 | if tags != "":
101 | line_protocol = f"{self.measurement_name},{tags} {fields} {timestamp}"
102 | else:
103 | line_protocol = f"{self.measurement_name} {fields} {timestamp}"
104 |
105 | return line_protocol
106 |
107 | def serialize(self, chunk_idx: int = None):
108 | import polars as pl
109 |
110 | df = self.data_frame
111 |
112 | # Check if the timestamp column is already an integer
113 | if df[self.timestamp_column].dtype in [pl.Int32, pl.Int64]:
114 | # The timestamp column is already an integer, assuming it's in Unix format
115 | pass
116 | else:
117 | # Convert timestamp to Unix timestamp based on specified precision
118 | if self.precision in [None, 'ns']:
119 | df = df.with_columns(
120 | pl.col(self.timestamp_column).dt.epoch(time_unit="ns").alias(self.timestamp_column))
121 | elif self.precision == 'us':
122 | df = df.with_columns(
123 | pl.col(self.timestamp_column).dt.epoch(time_unit="us").alias(self.timestamp_column))
124 | elif self.precision == 'ms':
125 | df = df.with_columns(
126 | pl.col(self.timestamp_column).dt.epoch(time_unit="ms").alias(self.timestamp_column))
127 | elif self.precision == 's':
128 | df = df.with_columns(pl.col(self.timestamp_column).dt.epoch(time_unit="s").alias(self.timestamp_column))
129 | else:
130 | raise ValueError(f"Unsupported precision: {self.precision}")
131 |
132 | if chunk_idx is None:
133 | chunk = df
134 | else:
135 | logger.debug("Serialize chunk %s/%s ...", chunk_idx + 1, self.number_of_chunks)
136 | chunk = df[chunk_idx * self.chunk_size:(chunk_idx + 1) * self.chunk_size]
137 |
138 | # Apply the UDF to each row
139 | line_protocol_expr = chunk.map_rows(self.to_line_protocol, return_dtype=pl.Object)
140 |
141 | lp = line_protocol_expr['map'].to_list()
142 |
143 | return lp
144 |
145 |
146 | def polars_data_frame_to_list_of_points(data_frame, point_settings, precision=DEFAULT_WRITE_PRECISION, **kwargs):
147 | """
148 | Serialize DataFrame into LineProtocols.
149 |
150 | :param data_frame: Pandas DataFrame to serialize
151 | :param point_settings: Default Tags
152 | :param precision: The precision for the unix timestamps within the body line-protocol.
153 | :key data_frame_measurement_name: name of measurement for writing Pandas DataFrame
154 | :key data_frame_tag_columns: list of DataFrame columns which are tags, rest columns will be fields
155 | :key data_frame_timestamp_column: name of DataFrame column which contains a timestamp. The column can be defined as a :class:`~str` value
156 | formatted as `2018-10-26`, `2018-10-26 12:00`, `2018-10-26 12:00:00-05:00`
157 | or other formats and types supported by `pandas.to_datetime `_ - ``DataFrame``
158 | :key data_frame_timestamp_timezone: name of the timezone which is used for timestamp column - ``DataFrame``
159 | """ # noqa: E501
160 | return PolarsDataframeSerializer(data_frame, point_settings, precision, **kwargs).serialize()
161 |
--------------------------------------------------------------------------------
/influxdb_client_3/write_client/client/write/retry.py:
--------------------------------------------------------------------------------
1 | """Implementation for Retry strategy during HTTP requests."""
2 |
3 | import logging
4 | from datetime import datetime, timedelta
5 | from itertools import takewhile
6 | from random import random
7 | from typing import Callable
8 |
9 | from urllib3 import Retry
10 | from urllib3.exceptions import MaxRetryError, ResponseError
11 |
12 | from influxdb_client_3.exceptions import InfluxDBError
13 |
14 | logger = logging.getLogger('influxdb_client.client.write.retry')
15 |
16 |
17 | class WritesRetry(Retry):
18 | """
19 | Writes retry configuration.
20 |
21 | The next delay is computed as random value between range
22 | `retry_interval * exponential_base^(attempts-1)` and `retry_interval * exponential_base^(attempts)
23 |
24 | Example:
25 | for retry_interval=5, exponential_base=2, max_retry_delay=125, total=5
26 | retry delays are random distributed values within the ranges of
27 | [5-10, 10-20, 20-40, 40-80, 80-125]
28 | """
29 |
30 | def __init__(self, jitter_interval=0, max_retry_delay=125, exponential_base=2, max_retry_time=180, total=5,
31 | retry_interval=5, retry_callback: Callable[[Exception], int] = None, **kw):
32 | """
33 | Initialize defaults.
34 |
35 | :param int jitter_interval: random milliseconds when retrying writes
36 | :param num max_retry_delay: maximum delay when retrying write in seconds
37 | :param int max_retry_time: maximum total retry timeout in seconds,
38 | attempt after this timout throws MaxRetryError
39 | :param int total: maximum number of retries
40 | :param num retry_interval: initial first retry delay range in seconds
41 | :param int exponential_base: base for the exponential retry delay,
42 | :param Callable[[Exception], int] retry_callback: the callable ``callback`` to run after retryable
43 | error occurred.
44 | The callable must accept one argument:
45 | - `Exception`: an retryable error
46 | """
47 | super().__init__(**kw)
48 | self.jitter_interval = jitter_interval
49 | self.total = total
50 | self.retry_interval = retry_interval
51 | self.max_retry_delay = max_retry_delay
52 | self.max_retry_time = max_retry_time
53 | self.exponential_base = exponential_base
54 | self.retry_timeout = datetime.now() + timedelta(seconds=max_retry_time)
55 | self.retry_callback = retry_callback
56 |
57 | def new(self, **kw):
58 | """Initialize defaults."""
59 | if 'jitter_interval' not in kw:
60 | kw['jitter_interval'] = self.jitter_interval
61 | if 'retry_interval' not in kw:
62 | kw['retry_interval'] = self.retry_interval
63 | if 'max_retry_delay' not in kw:
64 | kw['max_retry_delay'] = self.max_retry_delay
65 | if 'max_retry_time' not in kw:
66 | kw['max_retry_time'] = self.max_retry_time
67 | if 'exponential_base' not in kw:
68 | kw['exponential_base'] = self.exponential_base
69 | if 'retry_callback' not in kw:
70 | kw['retry_callback'] = self.retry_callback
71 |
72 | new = super().new(**kw)
73 | new.retry_timeout = self.retry_timeout
74 | return new
75 |
76 | def is_retry(self, method, status_code, has_retry_after=False):
77 | """is_retry doesn't require retry_after header. If there is not Retry-After we will use backoff."""
78 | if not self._is_method_retryable(method):
79 | return False
80 |
81 | return self.total and (status_code >= 429)
82 |
83 | def get_backoff_time(self):
84 | """Variant of exponential backoff with initial and max delay and a random jitter delay."""
85 | # We want to consider only the last consecutive errors sequence (Ignore redirects).
86 | consecutive_errors_len = len(
87 | list(
88 | takewhile(lambda x: x.redirect_location is None, reversed(self.history))
89 | )
90 | )
91 | # First fail doesn't increase backoff
92 | consecutive_errors_len -= 1
93 | if consecutive_errors_len < 0:
94 | return 0
95 |
96 | range_start = self.retry_interval
97 | range_stop = self.retry_interval * self.exponential_base
98 |
99 | i = 1
100 | while i <= consecutive_errors_len:
101 | i += 1
102 | range_start = range_stop
103 | range_stop = range_stop * self.exponential_base
104 | if range_stop > self.max_retry_delay:
105 | break
106 |
107 | if range_stop > self.max_retry_delay:
108 | range_stop = self.max_retry_delay
109 |
110 | return range_start + (range_stop - range_start) * self._random()
111 |
112 | def get_retry_after(self, response):
113 | """Get the value of Retry-After header and append random jitter delay."""
114 | retry_after = super().get_retry_after(response)
115 | if retry_after:
116 | retry_after += self._jitter_delay()
117 | return retry_after
118 |
119 | def increment(self, method=None, url=None, response=None, error=None, _pool=None, _stacktrace=None):
120 | """Return a new Retry object with incremented retry counters."""
121 | if self.retry_timeout < datetime.now():
122 | raise MaxRetryError(_pool, url, error or ResponseError("max_retry_time exceeded"))
123 |
124 | new_retry = super().increment(method, url, response, error, _pool, _stacktrace)
125 |
126 | if response is not None:
127 | parsed_error = InfluxDBError(response=response)
128 | elif error is not None:
129 | parsed_error = error
130 | else:
131 | parsed_error = f"Failed request to: {url}"
132 |
133 | message = f"The retriable error occurred during request. Reason: '{parsed_error}'."
134 | if isinstance(parsed_error, InfluxDBError):
135 | message += f" Retry in {parsed_error.retry_after}s."
136 |
137 | if self.retry_callback:
138 | self.retry_callback(parsed_error)
139 |
140 | logger.warning(message)
141 |
142 | return new_retry
143 |
144 | def _jitter_delay(self):
145 | return self.jitter_interval * random()
146 |
147 | def _random(self):
148 | return random()
149 |
--------------------------------------------------------------------------------
/influxdb_client_3/write_client/configuration.py:
--------------------------------------------------------------------------------
1 | # coding: utf-8
2 |
3 | from __future__ import absolute_import
4 |
5 | import copy
6 | import logging
7 | import multiprocessing
8 | import sys
9 |
10 | import urllib3
11 |
12 |
13 | class TypeWithDefault(type):
14 |
15 | def __init__(cls, name, bases, dct):
16 | """Initialize with defaults."""
17 | super(TypeWithDefault, cls).__init__(name, bases, dct)
18 | cls._default = None
19 |
20 | def __call__(cls):
21 | """Call self as a function."""
22 | if cls._default is None:
23 | cls._default = type.__call__(cls)
24 | return copy.copy(cls._default)
25 |
26 | def set_default(cls, default):
27 | """Set dafaults."""
28 | cls._default = copy.copy(default)
29 |
30 |
31 | class Configuration(object, metaclass=TypeWithDefault):
32 |
33 | def __init__(self):
34 | """Initialize configuration."""
35 | # Default Base url
36 | self.host = "http://localhost/api/v2"
37 | # Temp file folder for downloading files
38 | self.temp_folder_path = None
39 |
40 | # Authentication Settings
41 | # dict to store API key(s)
42 | self.api_key = {}
43 | # dict to store API prefix (e.g. Bearer)
44 | self.api_key_prefix = {}
45 | # Username for HTTP basic authentication
46 | self.username = ""
47 | # Password for HTTP basic authentication
48 | self.password = ""
49 |
50 | # Logging Settings
51 | self.loggers = {}
52 | # Log format
53 | self.logger_format = '%(asctime)s %(levelname)s %(message)s'
54 | # Log stream handler
55 | self.logger_stream_handler = None
56 | # Log file handler
57 | self.logger_file_handler = None
58 | # Debug file location
59 | self.logger_file = None
60 | # Debug switch
61 | self.debug = False
62 |
63 | # SSL/TLS verification
64 | # Set this to false to skip verifying SSL certificate when calling API
65 | # from https server.
66 | self.verify_ssl = True
67 | # Set this to customize the certificate file to verify the peer.
68 | self.ssl_ca_cert = None
69 | # client certificate file
70 | self.cert_file = None
71 | # client key file
72 | self.cert_key_file = None
73 | # client key file password
74 | self.cert_key_password = None
75 | # Set this to True/False to enable/disable SSL hostname verification.
76 | self.assert_hostname = None
77 |
78 | # Set this to specify a custom ssl context to inject this context inside the urllib3 connection pool.
79 | self.ssl_context = None
80 |
81 | # urllib3 connection pool's maximum number of connections saved
82 | # per pool. urllib3 uses 1 connection as default value, but this is
83 | # not the best value when you are making a lot of possibly parallel
84 | # requests to the same host, which is often the case here.
85 | # cpu_count * 5 is used as default value to increase performance.
86 | self.connection_pool_maxsize = multiprocessing.cpu_count() * 5
87 | # Timeout setting for a request. If one number provided, it will be total request timeout.
88 | # It can also be a pair (tuple) of (connection, read) timeouts.
89 | self.timeout = None
90 |
91 | # Set to True/False to enable basic authentication when using proxied InfluxDB 1.8.x with no auth-enabled
92 | self.auth_basic = False
93 |
94 | # Proxy URL
95 | self.proxy = None
96 | # A dictionary containing headers that will be sent to the proxy
97 | self.proxy_headers = None
98 | # Safe chars for path_param
99 | self.safe_chars_for_path_param = ''
100 |
101 | # Compression settings
102 | self.enable_gzip = False
103 | self.gzip_threshold = None
104 |
105 | @property
106 | def logger_file(self):
107 | """Logger file.
108 |
109 | If the logger_file is None, then add stream handler and remove file
110 | handler. Otherwise, add file handler and remove stream handler.
111 |
112 | :param value: The logger_file path.
113 | :type: str
114 | """
115 | return self.__logger_file
116 |
117 | @logger_file.setter
118 | def logger_file(self, value):
119 | """Logger file.
120 |
121 | If the logger_file is None, then add stream handler and remove file
122 | handler. Otherwise, add file handler and remove stream handler.
123 |
124 | :param value: The logger_file path.
125 | :type: str
126 | """
127 | self.__logger_file = value
128 | if self.__logger_file:
129 | # If set logging file,
130 | # then add file handler and remove stream handler.
131 | self.logger_file_handler = logging.FileHandler(self.__logger_file)
132 | self.logger_file_handler.setFormatter(self.logger_formatter)
133 | for _, logger in self.loggers.items():
134 | logger.addHandler(self.logger_file_handler)
135 |
136 | @property
137 | def debug(self):
138 | """Debug status.
139 |
140 | :param value: The debug status, True or False.
141 | :type: bool
142 | """
143 | return self.__debug
144 |
145 | @debug.setter
146 | def debug(self, value):
147 | """Debug status.
148 |
149 | :param value: The debug status, True or False.
150 | :type: bool
151 | """
152 | self.__debug = value
153 | if self.__debug:
154 | # if debug status is True, turn on debug logging
155 | for name, logger in self.loggers.items():
156 | logger.setLevel(logging.DEBUG)
157 | if name == 'influxdb_client.client.http':
158 | # makes sure to do not duplicate stdout handler
159 | if not any(map(lambda h: isinstance(h, logging.StreamHandler) and h.stream == sys.stdout,
160 | logger.handlers)):
161 | logger.addHandler(logging.StreamHandler(sys.stdout))
162 | # we use 'influxdb_client.client.http' logger instead of this
163 | # httplib.HTTPConnection.debuglevel = 1
164 | else:
165 | # if debug status is False, turn off debug logging,
166 | # setting log level to default `logging.WARNING`
167 | for _, logger in self.loggers.items():
168 | logger.setLevel(logging.WARNING)
169 | # we use 'influxdb_client.client.http' logger instead of this
170 | # httplib.HTTPConnection.debuglevel = 0
171 |
172 | @property
173 | def logger_format(self):
174 | """Logger format.
175 |
176 | The logger_formatter will be updated when sets logger_format.
177 |
178 | :param value: The format string.
179 | :type: str
180 | """
181 | return self.__logger_format
182 |
183 | @logger_format.setter
184 | def logger_format(self, value):
185 | """Logger format.
186 |
187 | The logger_formatter will be updated when sets logger_format.
188 |
189 | :param value: The format string.
190 | :type: str
191 | """
192 | self.__logger_format = value
193 | self.logger_formatter = logging.Formatter(self.__logger_format)
194 |
195 | def get_api_key_with_prefix(self, identifier):
196 | """Get API key (with prefix if set).
197 |
198 | :param identifier: The identifier of apiKey.
199 | :return: The token for api key authentication.
200 | """
201 | if (self.api_key.get(identifier) and
202 | self.api_key_prefix.get(identifier)):
203 | return self.api_key_prefix[identifier] + ' ' + self.api_key[identifier] # noqa: E501
204 | elif self.api_key.get(identifier):
205 | return self.api_key[identifier]
206 |
207 | def get_basic_auth_token(self):
208 | """Get HTTP basic authentication header (string).
209 |
210 | :return: The token for basic HTTP authentication.
211 | """
212 | return urllib3.util.make_headers(
213 | basic_auth=self.username + ':' + self.password
214 | ).get('authorization')
215 |
216 | def auth_settings(self):
217 | """Get Auth Settings dict for api client.
218 |
219 | :return: The Auth Settings information dict.
220 | """
221 | return {
222 | 'BasicAuthentication':
223 | {
224 | 'type': 'basic',
225 | 'in': 'header',
226 | 'key': 'Authorization',
227 | 'value': self.get_basic_auth_token()
228 | },
229 | 'TokenAuthentication':
230 | {
231 | 'type': 'api_key',
232 | 'in': 'header',
233 | 'key': 'Authorization',
234 | 'value': self.get_api_key_with_prefix('Authorization')
235 | },
236 |
237 | }
238 |
239 | def to_debug_report(self):
240 | """Get the essential information for debugging.
241 |
242 | :return: The report for debugging.
243 | """
244 | from write_client import VERSION
245 | return "Python SDK Debug Report:\n"\
246 | "OS: {env}\n"\
247 | "Python Version: {pyversion}\n"\
248 | "Version of the API: 2.0.0\n"\
249 | "SDK Package Version: {client_version}".\
250 | format(env=sys.platform, pyversion=sys.version, client_version=VERSION)
251 |
252 | def update_request_header_params(self, path: str, params: dict, should_gzip: bool = False):
253 | """Update header params based on custom settings.
254 |
255 | :param path: Resource path.
256 | :param params: Header parameters dict to be updated.
257 | :param should_gzip: Describes if request body should be gzip compressed.
258 | """
259 | pass
260 |
261 | def update_request_body(self, path: str, body, should_gzip: bool = False):
262 | """Update http body based on custom settings.
263 |
264 | :param path: Resource path.
265 | :param body: Request body to be updated.
266 | :param should_gzip: Describes if request body should be gzip compressed.
267 | :return: Updated body
268 | """
269 | return body
270 |
--------------------------------------------------------------------------------
/influxdb_client_3/write_client/domain/__init__.py:
--------------------------------------------------------------------------------
1 | # coding: utf-8
2 |
3 | # flake8: noqa
4 |
5 | from __future__ import absolute_import
6 |
7 | # import models into model package
8 | from influxdb_client_3.write_client.domain.write_precision import WritePrecision
9 |
10 |
11 |
--------------------------------------------------------------------------------
/influxdb_client_3/write_client/domain/write_precision.py:
--------------------------------------------------------------------------------
1 | # coding: utf-8
2 |
3 | import pprint
4 | import re # noqa: F401
5 |
6 |
7 | class WritePrecision(object):
8 |
9 | """
10 | allowed enum values
11 | """
12 | MS = "ms"
13 | S = "s"
14 | US = "us"
15 | NS = "ns"
16 |
17 | """
18 | Attributes:
19 | openapi_types (dict): The key is attribute name
20 | and the value is attribute type.
21 | attribute_map (dict): The key is attribute name
22 | and the value is json key in definition.
23 | """
24 | openapi_types = {
25 | }
26 |
27 | attribute_map = {
28 | }
29 |
30 | def __init__(self): # noqa: E501,D401,D403
31 | """WritePrecision - a model defined in OpenAPI.""" # noqa: E501 self.discriminator = None
32 |
33 | def to_dict(self):
34 | """Return the model properties as a dict."""
35 | result = {}
36 |
37 | for attr, _ in self.openapi_types.items():
38 | value = getattr(self, attr)
39 | if isinstance(value, list):
40 | result[attr] = list(map(
41 | lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
42 | value
43 | ))
44 | elif hasattr(value, "to_dict"):
45 | result[attr] = value.to_dict()
46 | elif isinstance(value, dict):
47 | result[attr] = dict(map(
48 | lambda item: (item[0], item[1].to_dict())
49 | if hasattr(item[1], "to_dict") else item,
50 | value.items()
51 | ))
52 | else:
53 | result[attr] = value
54 |
55 | return result
56 |
57 | def to_str(self):
58 | """Return the string representation of the model."""
59 | return pprint.pformat(self.to_dict())
60 |
61 | def __repr__(self):
62 | """For `print` and `pprint`."""
63 | return self.to_str()
64 |
65 | def __eq__(self, other):
66 | """Return true if both objects are equal."""
67 | if not isinstance(other, WritePrecision):
68 | return False
69 |
70 | return self.__dict__ == other.__dict__
71 |
72 | def __ne__(self, other):
73 | """Return true if both objects are not equal."""
74 | return not self == other
75 |
--------------------------------------------------------------------------------
/influxdb_client_3/write_client/domain/write_precision_converter.py:
--------------------------------------------------------------------------------
1 | from influxdb_client_3.write_client.domain import WritePrecision
2 |
3 |
4 | class WritePrecisionConverter(object):
5 |
6 | @staticmethod
7 | def to_v2_api_string(precision):
8 | """
9 | Converts WritePrecision to its string representation for V2 API.
10 | """
11 | if precision in [WritePrecision.NS, WritePrecision.US, WritePrecision.MS, WritePrecision.S]:
12 | return precision
13 | else:
14 | raise ValueError("Unsupported precision '%s'" % precision)
15 |
16 | @staticmethod
17 | def to_v3_api_string(precision):
18 | """
19 | Converts WritePrecision to its string representation for V3 API.
20 | """
21 | if precision == WritePrecision.NS:
22 | return "nanosecond"
23 | elif precision == WritePrecision.US:
24 | return "microsecond"
25 | elif precision == WritePrecision.MS:
26 | return "millisecond"
27 | elif precision == WritePrecision.S:
28 | return "second"
29 | else:
30 | raise ValueError("Unsupported precision '%s'" % precision)
31 |
--------------------------------------------------------------------------------
/influxdb_client_3/write_client/extras.py:
--------------------------------------------------------------------------------
1 | """Extras to selectively import Pandas or NumPy."""
2 |
3 | try:
4 | import pandas as pd
5 | except ModuleNotFoundError as err:
6 | raise ImportError(f"`query_data_frame` requires Pandas which couldn't be imported due: {err}")
7 |
8 | try:
9 | import numpy as np
10 | except ModuleNotFoundError as err:
11 | raise ImportError(f"`data_frame` requires numpy which couldn't be imported due: {err}")
12 |
13 | __all__ = ['pd', 'np']
14 |
--------------------------------------------------------------------------------
/influxdb_client_3/write_client/rest.py:
--------------------------------------------------------------------------------
1 | # coding: utf-8
2 |
3 | from __future__ import absolute_import
4 |
5 | import logging
6 | from typing import Dict
7 |
8 | from influxdb_client_3.exceptions import InfluxDBError
9 | from influxdb_client_3.write_client.configuration import Configuration
10 |
11 | _UTF_8_encoding = 'utf-8'
12 |
13 |
14 | class ApiException(InfluxDBError):
15 |
16 | def __init__(self, status=None, reason=None, http_resp=None):
17 | """Initialize with HTTP response."""
18 | super().__init__(response=http_resp)
19 | if http_resp:
20 | self.status = http_resp.status
21 | self.reason = http_resp.reason
22 | self.body = http_resp.data
23 | self.headers = http_resp.getheaders()
24 | else:
25 | self.status = status
26 | self.reason = reason
27 | self.body = None
28 | self.headers = None
29 |
30 | def __str__(self):
31 | """Get custom error messages for exception."""
32 | error_message = "({0})\n" \
33 | "Reason: {1}\n".format(self.status, self.reason)
34 | if self.headers:
35 | error_message += "HTTP response headers: {0}\n".format(
36 | self.headers)
37 |
38 | if self.body:
39 | error_message += "HTTP response body: {0}\n".format(self.body)
40 |
41 | return error_message
42 |
43 |
44 | class _BaseRESTClient(object):
45 | logger = logging.getLogger('influxdb_client.client.http')
46 |
47 | @staticmethod
48 | def log_request(method: str, url: str):
49 | _BaseRESTClient.logger.debug(f">>> Request: '{method} {url}'")
50 |
51 | @staticmethod
52 | def log_response(status: str):
53 | _BaseRESTClient.logger.debug(f"<<< Response: {status}")
54 |
55 | @staticmethod
56 | def log_body(body: object, prefix: str):
57 | _BaseRESTClient.logger.debug(f"{prefix} Body: {body}")
58 |
59 | @staticmethod
60 | def log_headers(headers: Dict[str, str], prefix: str):
61 | for key, v in headers.items():
62 | value = v
63 | if 'authorization' == key.lower():
64 | value = '***'
65 | _BaseRESTClient.logger.debug(f"{prefix} {key}: {value}")
66 |
67 |
68 | def _requires_create_user_session(configuration: Configuration, cookie: str, resource_path: str):
69 | _unauthorized = ['/api/v2/signin', '/api/v2/signout']
70 | return configuration.username and configuration.password and not cookie and resource_path not in _unauthorized
71 |
72 |
73 | def _requires_expire_user_session(configuration: Configuration, cookie: str):
74 | return configuration.username and configuration.password and cookie
75 |
--------------------------------------------------------------------------------
/influxdb_client_3/write_client/service/__init__.py:
--------------------------------------------------------------------------------
1 | # flake8: noqa
2 |
3 | from __future__ import absolute_import
4 |
5 | # import apis into api package
6 | from influxdb_client_3.write_client.service.write_service import WriteService
7 | from influxdb_client_3.write_client.service.signin_service import SigninService
8 | from influxdb_client_3.write_client.service.signout_service import SignoutService
--------------------------------------------------------------------------------
/influxdb_client_3/write_client/service/_base_service.py:
--------------------------------------------------------------------------------
1 |
2 |
3 | # noinspection PyMethodMayBeStatic
4 | class _BaseService(object):
5 |
6 | def __init__(self, api_client=None):
7 | """Init common services operation."""
8 | if api_client is None:
9 | raise ValueError("Invalid value for `api_client`, must be defined.")
10 | self.api_client = api_client
11 | self._build_type = None
12 |
13 | def _check_operation_params(self, operation_id, supported_params, local_params):
14 | supported_params.append('async_req')
15 | supported_params.append('_return_http_data_only')
16 | supported_params.append('_preload_content')
17 | supported_params.append('_request_timeout')
18 | supported_params.append('urlopen_kw')
19 | for key, val in local_params['kwargs'].items():
20 | if key not in supported_params:
21 | raise TypeError(
22 | f"Got an unexpected keyword argument '{key}'"
23 | f" to method {operation_id}"
24 | )
25 | local_params[key] = val
26 | del local_params['kwargs']
27 |
28 | def _is_cloud_instance(self) -> bool:
29 | if not self._build_type:
30 | self._build_type = self.build_type()
31 | return 'cloud' in self._build_type.lower()
32 |
33 | async def _is_cloud_instance_async(self) -> bool:
34 | if not self._build_type:
35 | self._build_type = await self.build_type_async()
36 | return 'cloud' in self._build_type.lower()
37 |
38 | def build_type(self) -> str:
39 | """
40 | Return the build type of the connected InfluxDB Server.
41 |
42 | :return: The type of InfluxDB build.
43 | """
44 | from write_client import PingService
45 | ping_service = PingService(self.api_client)
46 |
47 | response = ping_service.get_ping_with_http_info(_return_http_data_only=False)
48 | return self.response_header(response, header_name='X-Influxdb-Build')
49 |
50 | async def build_type_async(self) -> str:
51 | """
52 | Return the build type of the connected InfluxDB Server.
53 |
54 | :return: The type of InfluxDB build.
55 | """
56 | from write_client import PingService
57 | ping_service = PingService(self.api_client)
58 |
59 | response = await ping_service.get_ping_async(_return_http_data_only=False)
60 | return self.response_header(response, header_name='X-Influxdb-Build')
61 |
62 | def response_header(self, response, header_name='X-Influxdb-Version') -> str:
63 | if response is not None and len(response) >= 3:
64 | if header_name in response[2]:
65 | return response[2][header_name]
66 |
67 | return "unknown"
68 |
--------------------------------------------------------------------------------
/influxdb_client_3/write_client/service/signin_service.py:
--------------------------------------------------------------------------------
1 | # coding: utf-8
2 |
3 | from __future__ import absolute_import
4 |
5 | import re # noqa: F401
6 |
7 | from influxdb_client_3.write_client.service._base_service import _BaseService
8 |
9 |
10 | class SigninService(_BaseService):
11 |
12 | def __init__(self, api_client=None): # noqa: E501,D401,D403
13 | """SigninService - a operation defined in OpenAPI."""
14 | super().__init__(api_client)
15 |
16 | def post_signin(self, **kwargs): # noqa: E501,D401,D403
17 | """Create a user session..
18 |
19 | Authenticates [Basic authentication credentials](#section/Authentication/BasicAuthentication) for a [user](https://docs.influxdata.com/influxdb/latest/reference/glossary/#user), and then, if successful, generates a user session. To authenticate a user, pass the HTTP `Authorization` header with the `Basic` scheme and the base64-encoded username and password. For syntax and more information, see [Basic Authentication](#section/Authentication/BasicAuthentication) for syntax and more information. If authentication is successful, InfluxDB creates a new session for the user and then returns the session cookie in the `Set-Cookie` response header. InfluxDB stores user sessions in memory only. They expire within ten minutes and during restarts of the InfluxDB instance. #### User sessions with authorizations - In InfluxDB Cloud, a user session inherits all the user's permissions for the organization. - In InfluxDB OSS, a user session inherits all the user's permissions for all the organizations that the user belongs to. #### Related endpoints - [Signout](#tag/Signout)
20 | This method makes a synchronous HTTP request by default. To make an
21 | asynchronous HTTP request, please pass async_req=True
22 | >>> thread = api.post_signin(async_req=True)
23 | >>> result = thread.get()
24 |
25 | :param async_req bool
26 | :param str zap_trace_span: OpenTracing span context
27 | :param str authorization: An auth credential for the Basic scheme
28 | :return: None
29 | If the method is called asynchronously,
30 | returns the request thread.
31 | """ # noqa: E501
32 | kwargs['_return_http_data_only'] = True
33 | if kwargs.get('async_req'):
34 | return self.post_signin_with_http_info(**kwargs) # noqa: E501
35 | else:
36 | (data) = self.post_signin_with_http_info(**kwargs) # noqa: E501
37 | return data
38 |
39 | def post_signin_with_http_info(self, **kwargs): # noqa: E501,D401,D403
40 | """Create a user session..
41 |
42 | Authenticates [Basic authentication credentials](#section/Authentication/BasicAuthentication) for a [user](https://docs.influxdata.com/influxdb/latest/reference/glossary/#user), and then, if successful, generates a user session. To authenticate a user, pass the HTTP `Authorization` header with the `Basic` scheme and the base64-encoded username and password. For syntax and more information, see [Basic Authentication](#section/Authentication/BasicAuthentication) for syntax and more information. If authentication is successful, InfluxDB creates a new session for the user and then returns the session cookie in the `Set-Cookie` response header. InfluxDB stores user sessions in memory only. They expire within ten minutes and during restarts of the InfluxDB instance. #### User sessions with authorizations - In InfluxDB Cloud, a user session inherits all the user's permissions for the organization. - In InfluxDB OSS, a user session inherits all the user's permissions for all the organizations that the user belongs to. #### Related endpoints - [Signout](#tag/Signout)
43 | This method makes a synchronous HTTP request by default. To make an
44 | asynchronous HTTP request, please pass async_req=True
45 | >>> thread = api.post_signin_with_http_info(async_req=True)
46 | >>> result = thread.get()
47 |
48 | :param async_req bool
49 | :param str zap_trace_span: OpenTracing span context
50 | :param str authorization: An auth credential for the Basic scheme
51 | :return: None
52 | If the method is called asynchronously,
53 | returns the request thread.
54 | """ # noqa: E501
55 | local_var_params, path_params, query_params, header_params, body_params = \
56 | self._post_signin_prepare(**kwargs) # noqa: E501
57 |
58 | return self.api_client.call_api(
59 | '/api/v2/signin', 'POST',
60 | path_params,
61 | query_params,
62 | header_params,
63 | body=body_params,
64 | post_params=[],
65 | files={},
66 | response_type=None, # noqa: E501
67 | auth_settings=['BasicAuthentication'],
68 | async_req=local_var_params.get('async_req'),
69 | _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
70 | _preload_content=local_var_params.get('_preload_content', True),
71 | _request_timeout=local_var_params.get('_request_timeout'),
72 | collection_formats={},
73 | urlopen_kw=kwargs.get('urlopen_kw', None))
74 |
75 | async def post_signin_async(self, **kwargs): # noqa: E501,D401,D403
76 | """Create a user session..
77 |
78 | Authenticates [Basic authentication credentials](#section/Authentication/BasicAuthentication) for a [user](https://docs.influxdata.com/influxdb/latest/reference/glossary/#user), and then, if successful, generates a user session. To authenticate a user, pass the HTTP `Authorization` header with the `Basic` scheme and the base64-encoded username and password. For syntax and more information, see [Basic Authentication](#section/Authentication/BasicAuthentication) for syntax and more information. If authentication is successful, InfluxDB creates a new session for the user and then returns the session cookie in the `Set-Cookie` response header. InfluxDB stores user sessions in memory only. They expire within ten minutes and during restarts of the InfluxDB instance. #### User sessions with authorizations - In InfluxDB Cloud, a user session inherits all the user's permissions for the organization. - In InfluxDB OSS, a user session inherits all the user's permissions for all the organizations that the user belongs to. #### Related endpoints - [Signout](#tag/Signout)
79 | This method makes an asynchronous HTTP request.
80 |
81 | :param async_req bool
82 | :param str zap_trace_span: OpenTracing span context
83 | :param str authorization: An auth credential for the Basic scheme
84 | :return: None
85 | If the method is called asynchronously,
86 | returns the request thread.
87 | """ # noqa: E501
88 | local_var_params, path_params, query_params, header_params, body_params = \
89 | self._post_signin_prepare(**kwargs) # noqa: E501
90 |
91 | return await self.api_client.call_api(
92 | '/api/v2/signin', 'POST',
93 | path_params,
94 | query_params,
95 | header_params,
96 | body=body_params,
97 | post_params=[],
98 | files={},
99 | response_type=None, # noqa: E501
100 | auth_settings=['BasicAuthentication'],
101 | async_req=local_var_params.get('async_req'),
102 | _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
103 | _preload_content=local_var_params.get('_preload_content', True),
104 | _request_timeout=local_var_params.get('_request_timeout'),
105 | collection_formats={},
106 | urlopen_kw=kwargs.get('urlopen_kw', None))
107 |
108 | def _post_signin_prepare(self, **kwargs): # noqa: E501,D401,D403
109 | local_var_params = dict(locals())
110 |
111 | all_params = ['zap_trace_span', 'authorization'] # noqa: E501
112 | self._check_operation_params('post_signin', all_params, local_var_params)
113 |
114 | path_params = {}
115 |
116 | query_params = []
117 |
118 | header_params = {}
119 | if 'zap_trace_span' in local_var_params:
120 | header_params['Zap-Trace-Span'] = local_var_params['zap_trace_span'] # noqa: E501
121 | if 'authorization' in local_var_params:
122 | header_params['Authorization'] = local_var_params['authorization'] # noqa: E501
123 |
124 | body_params = None
125 | # HTTP header `Accept`
126 | header_params['Accept'] = self.api_client.select_header_accept(
127 | ['application/json']) # noqa: E501
128 |
129 | return local_var_params, path_params, query_params, header_params, body_params
130 |
--------------------------------------------------------------------------------
/influxdb_client_3/write_client/service/signout_service.py:
--------------------------------------------------------------------------------
1 | # coding: utf-8
2 |
3 | from __future__ import absolute_import
4 |
5 | import re # noqa: F401
6 |
7 | from influxdb_client_3.write_client.service._base_service import _BaseService
8 |
9 |
10 | class SignoutService(_BaseService):
11 |
12 | def __init__(self, api_client=None): # noqa: E501,D401,D403
13 | """SignoutService - a operation defined in OpenAPI."""
14 | super().__init__(api_client)
15 |
16 | def post_signout(self, **kwargs): # noqa: E501,D401,D403
17 | """Expire a user session.
18 |
19 | Expires a user session specified by a session cookie. Use this endpoint to expire a user session that was generated when the user authenticated with the InfluxDB Developer Console (UI) or the `POST /api/v2/signin` endpoint. For example, the `POST /api/v2/signout` endpoint represents the third step in the following three-step process to authenticate a user, retrieve the `user` resource, and then expire the session: 1. Send a request with the user's [Basic authentication credentials](#section/Authentication/BasicAuthentication) to the `POST /api/v2/signin` endpoint to create a user session and generate a session cookie. 2. Send a request to the `GET /api/v2/me` endpoint, passing the stored session cookie from step 1 to retrieve user information. 3. Send a request to the `POST /api/v2/signout` endpoint, passing the stored session cookie to expire the session. _See the complete example in request samples._ InfluxDB stores user sessions in memory only. If a user doesn't sign out, then the user session automatically expires within ten minutes or during a restart of the InfluxDB instance. To learn more about cookies in HTTP requests, see [Mozilla Developer Network (MDN) Web Docs, HTTP cookies](https://developer.mozilla.org/en-US/docs/Web/HTTP/Cookies). #### Related endpoints - [Signin](#tag/Signin)
20 | This method makes a synchronous HTTP request by default. To make an
21 | asynchronous HTTP request, please pass async_req=True
22 | >>> thread = api.post_signout(async_req=True)
23 | >>> result = thread.get()
24 |
25 | :param async_req bool
26 | :param str zap_trace_span: OpenTracing span context
27 | :return: None
28 | If the method is called asynchronously,
29 | returns the request thread.
30 | """ # noqa: E501
31 | kwargs['_return_http_data_only'] = True
32 | if kwargs.get('async_req'):
33 | return self.post_signout_with_http_info(**kwargs) # noqa: E501
34 | else:
35 | (data) = self.post_signout_with_http_info(**kwargs) # noqa: E501
36 | return data
37 |
38 | def post_signout_with_http_info(self, **kwargs): # noqa: E501,D401,D403
39 | """Expire a user session.
40 |
41 | Expires a user session specified by a session cookie. Use this endpoint to expire a user session that was generated when the user authenticated with the InfluxDB Developer Console (UI) or the `POST /api/v2/signin` endpoint. For example, the `POST /api/v2/signout` endpoint represents the third step in the following three-step process to authenticate a user, retrieve the `user` resource, and then expire the session: 1. Send a request with the user's [Basic authentication credentials](#section/Authentication/BasicAuthentication) to the `POST /api/v2/signin` endpoint to create a user session and generate a session cookie. 2. Send a request to the `GET /api/v2/me` endpoint, passing the stored session cookie from step 1 to retrieve user information. 3. Send a request to the `POST /api/v2/signout` endpoint, passing the stored session cookie to expire the session. _See the complete example in request samples._ InfluxDB stores user sessions in memory only. If a user doesn't sign out, then the user session automatically expires within ten minutes or during a restart of the InfluxDB instance. To learn more about cookies in HTTP requests, see [Mozilla Developer Network (MDN) Web Docs, HTTP cookies](https://developer.mozilla.org/en-US/docs/Web/HTTP/Cookies). #### Related endpoints - [Signin](#tag/Signin)
42 | This method makes a synchronous HTTP request by default. To make an
43 | asynchronous HTTP request, please pass async_req=True
44 | >>> thread = api.post_signout_with_http_info(async_req=True)
45 | >>> result = thread.get()
46 |
47 | :param async_req bool
48 | :param str zap_trace_span: OpenTracing span context
49 | :return: None
50 | If the method is called asynchronously,
51 | returns the request thread.
52 | """ # noqa: E501
53 | local_var_params, path_params, query_params, header_params, body_params = \
54 | self._post_signout_prepare(**kwargs) # noqa: E501
55 |
56 | return self.api_client.call_api(
57 | '/api/v2/signout', 'POST',
58 | path_params,
59 | query_params,
60 | header_params,
61 | body=body_params,
62 | post_params=[],
63 | files={},
64 | response_type=None, # noqa: E501
65 | auth_settings=[],
66 | async_req=local_var_params.get('async_req'),
67 | _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
68 | _preload_content=local_var_params.get('_preload_content', True),
69 | _request_timeout=local_var_params.get('_request_timeout'),
70 | collection_formats={},
71 | urlopen_kw=kwargs.get('urlopen_kw', None))
72 |
73 | async def post_signout_async(self, **kwargs): # noqa: E501,D401,D403
74 | """Expire a user session.
75 |
76 | Expires a user session specified by a session cookie. Use this endpoint to expire a user session that was generated when the user authenticated with the InfluxDB Developer Console (UI) or the `POST /api/v2/signin` endpoint. For example, the `POST /api/v2/signout` endpoint represents the third step in the following three-step process to authenticate a user, retrieve the `user` resource, and then expire the session: 1. Send a request with the user's [Basic authentication credentials](#section/Authentication/BasicAuthentication) to the `POST /api/v2/signin` endpoint to create a user session and generate a session cookie. 2. Send a request to the `GET /api/v2/me` endpoint, passing the stored session cookie from step 1 to retrieve user information. 3. Send a request to the `POST /api/v2/signout` endpoint, passing the stored session cookie to expire the session. _See the complete example in request samples._ InfluxDB stores user sessions in memory only. If a user doesn't sign out, then the user session automatically expires within ten minutes or during a restart of the InfluxDB instance. To learn more about cookies in HTTP requests, see [Mozilla Developer Network (MDN) Web Docs, HTTP cookies](https://developer.mozilla.org/en-US/docs/Web/HTTP/Cookies). #### Related endpoints - [Signin](#tag/Signin)
77 | This method makes an asynchronous HTTP request.
78 |
79 | :param async_req bool
80 | :param str zap_trace_span: OpenTracing span context
81 | :return: None
82 | If the method is called asynchronously,
83 | returns the request thread.
84 | """ # noqa: E501
85 | local_var_params, path_params, query_params, header_params, body_params = \
86 | self._post_signout_prepare(**kwargs) # noqa: E501
87 |
88 | return await self.api_client.call_api(
89 | '/api/v2/signout', 'POST',
90 | path_params,
91 | query_params,
92 | header_params,
93 | body=body_params,
94 | post_params=[],
95 | files={},
96 | response_type=None, # noqa: E501
97 | auth_settings=[],
98 | async_req=local_var_params.get('async_req'),
99 | _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
100 | _preload_content=local_var_params.get('_preload_content', True),
101 | _request_timeout=local_var_params.get('_request_timeout'),
102 | collection_formats={},
103 | urlopen_kw=kwargs.get('urlopen_kw', None))
104 |
105 | def _post_signout_prepare(self, **kwargs): # noqa: E501,D401,D403
106 | local_var_params = dict(locals())
107 |
108 | all_params = ['zap_trace_span'] # noqa: E501
109 | self._check_operation_params('post_signout', all_params, local_var_params)
110 |
111 | path_params = {}
112 |
113 | query_params = []
114 |
115 | header_params = {}
116 | if 'zap_trace_span' in local_var_params:
117 | header_params['Zap-Trace-Span'] = local_var_params['zap_trace_span'] # noqa: E501
118 |
119 | body_params = None
120 | # HTTP header `Accept`
121 | header_params['Accept'] = self.api_client.select_header_accept(
122 | ['application/json']) # noqa: E501
123 |
124 | return local_var_params, path_params, query_params, header_params, body_params
125 |
--------------------------------------------------------------------------------
/pytest.ini:
--------------------------------------------------------------------------------
1 | [pytest]
2 | markers =
3 | integration: marks integration tests (deselect with '-m "not integration"')
4 |
--------------------------------------------------------------------------------
/python-logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/InfluxCommunity/influxdb3-python/7b9e8027abc22eeb9ce7236f46b68aa31b706c2b/python-logo.png
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import setup, find_packages
2 | import os
3 | import re
4 |
5 | requires = [
6 | 'reactivex >= 4.0.4',
7 | 'certifi >= 14.05.14',
8 | 'python_dateutil >= 2.5.3',
9 | 'setuptools >= 21.0.0',
10 | 'urllib3 >= 1.26.0',
11 | 'pyarrow >= 8.0.0'
12 | ]
13 |
14 | with open("./README.md", "r", encoding="utf-8") as fh:
15 | long_description = fh.read()
16 |
17 |
18 | def get_version_from_github_ref():
19 | github_ref = os.environ.get("GITHUB_REF")
20 | if not github_ref:
21 | return None
22 |
23 | match = re.match(r"refs/tags/v(\d+\.\d+\.\d+)", github_ref)
24 | if not match:
25 | return None
26 |
27 | return match.group(1)
28 |
29 |
30 | def get_version():
31 | # If running in GitHub Actions, get version from GITHUB_REF
32 | version = get_version_from_github_ref()
33 | if version:
34 | return version
35 |
36 | # Fallback to a default version if not in GitHub Actions
37 | return "v0.0.0"
38 |
39 |
40 | setup(
41 | name='influxdb3-python',
42 | version=get_version(),
43 | description='Community Python client for InfluxDB 3.0',
44 | long_description=long_description,
45 | long_description_content_type="text/markdown",
46 | author='InfluxData',
47 | author_email='contact@influxdata.com',
48 | url='https://github.com/InfluxCommunity/influxdb3-python',
49 | packages=find_packages(exclude=['tests', 'tests.*', 'examples', 'examples.*']),
50 | package_data={'influxdb_client_3': ['py.typed']},
51 | extras_require={
52 | 'pandas': ['pandas'],
53 | 'polars': ['polars'],
54 | 'dataframe': ['pandas', 'polars'],
55 | 'test': ['pytest', 'pytest-cov', 'pytest-httpserver']
56 | },
57 | install_requires=requires,
58 | python_requires='>=3.8',
59 | classifiers=[
60 | 'Development Status :: 4 - Beta',
61 | 'Intended Audience :: Developers',
62 | 'License :: OSI Approved :: MIT License',
63 | 'Programming Language :: Python :: 3.8',
64 | 'Programming Language :: Python :: 3.9',
65 | 'Programming Language :: Python :: 3.10',
66 | 'Programming Language :: Python :: 3.11',
67 | 'Programming Language :: Python :: 3.12',
68 | 'Programming Language :: Python :: 3.13',
69 | ]
70 | )
71 |
--------------------------------------------------------------------------------
/tests/__init__.py:
--------------------------------------------------------------------------------
1 | # needed to resolve some module imports when running pytest
2 |
--------------------------------------------------------------------------------
/tests/data/iot.csv:
--------------------------------------------------------------------------------
1 | name,building,temperature,time
2 | iot-devices,5a,72.3,2022-10-01T12:01:00Z
3 | iot-devices,5a,72.1,2022-10-02T12:01:00Z
4 | iot-devices,5a,72.2,2022-10-03T12:01:00Z
5 |
--------------------------------------------------------------------------------
/tests/test_api_client.py:
--------------------------------------------------------------------------------
1 | import json
2 | import unittest
3 | import uuid
4 | from unittest import mock
5 | from urllib3 import response
6 |
7 | from influxdb_client_3.write_client._sync.api_client import ApiClient
8 | from influxdb_client_3.write_client.configuration import Configuration
9 | from influxdb_client_3.exceptions import InfluxDBError
10 | from influxdb_client_3.write_client.service import WriteService
11 | from influxdb_client_3.version import VERSION
12 |
13 | _package = "influxdb3-python"
14 | _sentHeaders = {}
15 |
16 |
17 | def mock_rest_request(method,
18 | url,
19 | query_params=None,
20 | headers=None,
21 | body=None,
22 | post_params=None,
23 | _preload_content=True,
24 | _request_timeout=None,
25 | **urlopen_kw):
26 | class MockResponse:
27 | def __init__(self, data, status_code):
28 | self.data = data
29 | self.status_code = status_code
30 |
31 | def data(self):
32 | return self.data
33 |
34 | global _sentHeaders
35 | _sentHeaders = headers
36 |
37 | return MockResponse(None, 200)
38 |
39 |
40 | class ApiClientTests(unittest.TestCase):
41 |
42 | def test_default_headers(self):
43 | global _package
44 | conf = Configuration()
45 | client = ApiClient(conf,
46 | header_name="Authorization",
47 | header_value="Bearer TEST_TOKEN")
48 | self.assertIsNotNone(client.default_headers["User-Agent"])
49 | self.assertIsNotNone(client.default_headers["Authorization"])
50 | self.assertEqual(f"{_package}/{VERSION}", client.default_headers["User-Agent"])
51 | self.assertEqual("Bearer TEST_TOKEN", client.default_headers["Authorization"])
52 |
53 | @mock.patch("influxdb_client_3.write_client._sync.rest.RESTClientObject.request",
54 | side_effect=mock_rest_request)
55 | def test_call_api(self, mock_post):
56 | global _package
57 | global _sentHeaders
58 | _sentHeaders = {}
59 |
60 | conf = Configuration()
61 | client = ApiClient(conf,
62 | header_name="Authorization",
63 | header_value="Bearer TEST_TOKEN")
64 | service = WriteService(client)
65 | service.post_write("TEST_ORG", "TEST_BUCKET", "data,foo=bar val=3.14")
66 | self.assertEqual(4, len(_sentHeaders.keys()))
67 | self.assertIsNotNone(_sentHeaders["Accept"])
68 | self.assertEqual("application/json", _sentHeaders["Accept"])
69 | self.assertIsNotNone(_sentHeaders["Content-Type"])
70 | self.assertEqual("text/plain", _sentHeaders["Content-Type"])
71 | self.assertIsNotNone(_sentHeaders["Authorization"])
72 | self.assertEqual("Bearer TEST_TOKEN", _sentHeaders["Authorization"])
73 | self.assertIsNotNone(_sentHeaders["User-Agent"])
74 | self.assertEqual(f"{_package}/{VERSION}", _sentHeaders["User-Agent"])
75 |
76 | def _test_api_error(self, body):
77 | conf = Configuration()
78 | client = ApiClient(conf)
79 | client.rest_client.pool_manager.request \
80 | = mock.Mock(return_value=response.HTTPResponse(status=400,
81 | reason='Bad Request',
82 | body=body.encode()))
83 | service = WriteService(client)
84 | service.post_write("TEST_ORG", "TEST_BUCKET", "data,foo=bar val=3.14")
85 |
86 | def test_api_error_cloud(self):
87 | response_body = '{"message": "parsing failed for write_lp endpoint"}'
88 | with self.assertRaises(InfluxDBError) as err:
89 | self._test_api_error(response_body)
90 | self.assertEqual('parsing failed for write_lp endpoint', err.exception.message)
91 |
92 | def test_api_error_oss_without_detail(self):
93 | response_body = '{"error": "parsing failed for write_lp endpoint"}'
94 | with self.assertRaises(InfluxDBError) as err:
95 | self._test_api_error(response_body)
96 | self.assertEqual('parsing failed for write_lp endpoint', err.exception.message)
97 |
98 | def test_api_error_oss_with_detail(self):
99 | response_body = ('{"error":"parsing failed for write_lp endpoint","data":{"error_message":"invalid field value '
100 | 'in line protocol for field \'val\' on line 1"}}')
101 | with self.assertRaises(InfluxDBError) as err:
102 | self._test_api_error(response_body)
103 | self.assertEqual('invalid field value in line protocol for field \'val\' on line 1', err.exception.message)
104 |
105 | def test_api_error_unknown(self):
106 | response_body = '{"detail":"no info"}'
107 | with self.assertRaises(InfluxDBError) as err:
108 | self._test_api_error(response_body)
109 | self.assertEqual(response_body, err.exception.message)
110 |
111 | def test_api_error_headers(self):
112 | body = '{"error": "test error"}'
113 | body_dic = json.loads(body)
114 | conf = Configuration()
115 | local_client = ApiClient(conf)
116 | traceid = "123456789ABCDEF0"
117 | requestid = uuid.uuid4().__str__()
118 |
119 | local_client.rest_client.pool_manager.request = mock.Mock(
120 | return_value=response.HTTPResponse(
121 | status=400,
122 | reason='Bad Request',
123 | headers={
124 | 'Trace-Id': traceid,
125 | 'Trace-Sampled': 'false',
126 | 'X-Influxdb-Request-Id': requestid,
127 | 'X-Influxdb-Build': 'Mock'
128 | },
129 | body=body.encode()
130 | )
131 | )
132 | with self.assertRaises(InfluxDBError) as err:
133 | service = WriteService(local_client)
134 | service.post_write("TEST_ORG", "TEST_BUCKET", "data,foo=bar val=3.14")
135 | self.assertEqual(body_dic['error'], err.exception.message)
136 | headers = err.exception.getheaders()
137 | self.assertEqual(4, len(headers))
138 | self.assertEqual(headers['Trace-Id'], traceid)
139 | self.assertEqual(headers['Trace-Sampled'], 'false')
140 | self.assertEqual(headers['X-Influxdb-Request-Id'], requestid)
141 | self.assertEqual(headers['X-Influxdb-Build'], 'Mock')
142 |
143 | def test_should_gzip(self):
144 | # Test when gzip is disabled
145 | self.assertFalse(ApiClient.should_gzip("test", enable_gzip=False, gzip_threshold=1))
146 | self.assertFalse(ApiClient.should_gzip("test", enable_gzip=False, gzip_threshold=10000))
147 | self.assertFalse(ApiClient.should_gzip("test", enable_gzip=False, gzip_threshold=None))
148 |
149 | # Test when enable_gzip is True
150 | self.assertTrue(ApiClient.should_gzip("test", enable_gzip=True, gzip_threshold=None))
151 | self.assertTrue(ApiClient.should_gzip("test", enable_gzip=True, gzip_threshold=1))
152 | self.assertFalse(ApiClient.should_gzip("test", enable_gzip=True, gzip_threshold=100000))
153 |
154 | # Test payload smaller than threshold
155 | self.assertFalse(ApiClient.should_gzip("test", enable_gzip=True, gzip_threshold=10000))
156 |
157 | # Test payload larger than threshold
158 | large_payload = "x" * 10000
159 | self.assertTrue(ApiClient.should_gzip(large_payload, enable_gzip=True, gzip_threshold=1000))
160 |
161 | # Test exact threshold match and less than threshold
162 | payload = "x" * 1000
163 | self.assertTrue(ApiClient.should_gzip(payload, enable_gzip=True, gzip_threshold=1000))
164 |
--------------------------------------------------------------------------------
/tests/test_date_helper.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | from datetime import datetime, timezone
3 |
4 | from dateutil import tz
5 |
6 | from influxdb_client_3.write_client.client.util.date_utils import DateHelper, get_date_helper
7 |
8 |
9 | class TestDateHelper(unittest.TestCase):
10 |
11 | def test_to_utc(self):
12 | date = get_date_helper().to_utc(datetime(2021, 4, 29, 20, 30, 10, 0))
13 | self.assertEqual(datetime(2021, 4, 29, 20, 30, 10, 0, timezone.utc), date)
14 |
15 | def test_to_utc_different_timezone(self):
16 | date = DateHelper(timezone=tz.gettz('ETC/GMT+2')).to_utc(datetime(2021, 4, 29, 20, 30, 10, 0))
17 | self.assertEqual(datetime(2021, 4, 29, 22, 30, 10, 0, timezone.utc), date)
18 |
19 | def test_parse(self):
20 | date = get_date_helper().parse_date("2021-03-20T15:59:10.607352Z")
21 | self.assertEqual(datetime(2021, 3, 20, 15, 59, 10, 607352, timezone.utc), date)
22 |
--------------------------------------------------------------------------------
/tests/test_deep_merge.py:
--------------------------------------------------------------------------------
1 | import unittest
2 |
3 | import influxdb_client_3
4 |
5 |
6 | class TestDeepMerge(unittest.TestCase):
7 |
8 | def test_deep_merge_dicts_with_no_overlap(self):
9 | target = {"a": 1, "b": 2}
10 | source = {"c": 3, "d": 4}
11 | result = influxdb_client_3._deep_merge(target, source)
12 | self.assertEqual(result, {"a": 1, "b": 2, "c": 3, "d": 4})
13 |
14 | def test_deep_merge_dicts_with_overlap(self):
15 | target = {"a": 1, "b": 2}
16 | source = {"b": 3, "c": 4}
17 | result = influxdb_client_3._deep_merge(target, source)
18 | self.assertEqual(result, {"a": 1, "b": 3, "c": 4})
19 |
20 | def test_deep_merge_nested_dicts(self):
21 | target = {"a": {"b": 1}}
22 | source = {"a": {"c": 2}}
23 | result = influxdb_client_3._deep_merge(target, source)
24 | self.assertEqual(result, {"a": {"b": 1, "c": 2}})
25 |
26 | def test_deep_merge_lists(self):
27 | target = [1, 2]
28 | source = [3, 4]
29 | result = influxdb_client_3._deep_merge(target, source)
30 | self.assertEqual(result, [1, 2, 3, 4])
31 |
32 | def test_deep_merge_non_overlapping_types(self):
33 | target = {"a": 1}
34 | source = [2, 3]
35 | result = influxdb_client_3._deep_merge(target, source)
36 | self.assertEqual(result, [2, 3])
37 |
38 | def test_deep_merge_none_to_flight(self):
39 | target = {
40 | "headers": [(b"authorization", "Bearer xyz".encode('utf-8'))],
41 | "timeout": 300
42 | }
43 | source = None
44 | result = influxdb_client_3._deep_merge(target, source)
45 | self.assertEqual(result, target)
46 |
47 | def test_deep_merge_empty_to_flight(self):
48 | target = {
49 | "headers": [(b"authorization", "Bearer xyz".encode('utf-8'))],
50 | "timeout": 300
51 | }
52 | source = {}
53 | result = influxdb_client_3._deep_merge(target, source)
54 | self.assertEqual(result, target)
55 |
--------------------------------------------------------------------------------
/tests/test_merge_options.py:
--------------------------------------------------------------------------------
1 | import unittest
2 |
3 | import influxdb_client_3
4 |
5 |
6 | class TestMergeOptions(unittest.TestCase):
7 |
8 | def test_merge_with_empty_custom(self):
9 | defaults = {"a": 1, "b": 2}
10 | result = influxdb_client_3._merge_options(defaults, custom={})
11 | self.assertEqual(result, defaults)
12 |
13 | def test_merge_with_none_custom(self):
14 | defaults = {"a": 1, "b": 2}
15 | result = influxdb_client_3._merge_options(defaults, custom=None)
16 | self.assertEqual(result, defaults)
17 |
18 | def test_merge_with_no_excluded_keys(self):
19 | defaults = {"a": 1, "b": 2}
20 | custom = {"b": 3, "c": 4}
21 | result = influxdb_client_3._merge_options(defaults, custom=custom)
22 | self.assertEqual(result, {"a": 1, "b": 3, "c": 4})
23 |
24 | def test_merge_with_excluded_keys(self):
25 | defaults = {"a": 1, "b": 2}
26 | custom = {"b": 3, "c": 4}
27 | result = influxdb_client_3._merge_options(defaults, exclude_keys=["b"], custom=custom)
28 | self.assertEqual(result, {"a": 1, "b": 2, "c": 4})
29 |
--------------------------------------------------------------------------------
/tests/test_point.py:
--------------------------------------------------------------------------------
1 | import datetime
2 | import unittest
3 |
4 | from influxdb_client_3.write_client.client.write.point import EPOCH, Point
5 |
6 |
7 | class TestPoint(unittest.TestCase):
8 |
9 | def test_epoch(self):
10 | self.assertEqual(EPOCH, datetime.datetime(1970, 1, 1, 0, 0, tzinfo=datetime.timezone.utc))
11 |
12 | def test_point(self):
13 | point = Point.measurement("h2o").tag("location", "europe").field("level", 2.2).time(1_000_000)
14 | self.assertEqual('h2o,location=europe level=2.2 1000000', point.to_line_protocol())
15 |
--------------------------------------------------------------------------------
/tests/test_polars.py:
--------------------------------------------------------------------------------
1 | import importlib.util
2 | import time
3 | import unittest
4 | from unittest.mock import Mock, ANY
5 |
6 | from influxdb_client_3 import PointSettings, InfluxDBClient3, write_client_options, WriteOptions
7 | from influxdb_client_3.write_client import WriteService
8 | from influxdb_client_3.write_client.client.write.polars_dataframe_serializer import polars_data_frame_to_list_of_points
9 |
10 |
11 | @unittest.skipIf(importlib.util.find_spec("polars") is None, 'Polars package not installed')
12 | class TestPolarsDataFrameSerializer(unittest.TestCase):
13 |
14 | def test_to_list_of_points(self):
15 | import polars as pl
16 | ps = PointSettings()
17 | df = pl.DataFrame(data={
18 | "name": ['iot-devices', 'iot-devices', 'iot-devices'],
19 | "building": ['5a', '5a', '5a'],
20 | "temperature": [72.3, 72.1, 72.2],
21 | "time": pl.Series(["2022-10-01T12:01:00Z", "2022-10-02T12:01:00Z", "2022-10-03T12:01:00Z"])
22 | .str.to_datetime(time_unit='ns')
23 | })
24 | actual = polars_data_frame_to_list_of_points(df, ps,
25 | data_frame_measurement_name='iot-devices',
26 | data_frame_tag_columns=['building'],
27 | data_frame_timestamp_column='time')
28 |
29 | expected = [
30 | 'iot-devices,building=5a name="iot-devices",temperature=72.3 1664625660000000000',
31 | 'iot-devices,building=5a name="iot-devices",temperature=72.1 1664712060000000000',
32 | 'iot-devices,building=5a name="iot-devices",temperature=72.2 1664798460000000000'
33 | ]
34 | self.assertEqual(expected, actual)
35 |
36 |
37 | @unittest.skipIf(importlib.util.find_spec("polars") is None, 'Polars package not installed')
38 | class TestWritePolars(unittest.TestCase):
39 | def setUp(self):
40 | self.client = InfluxDBClient3(
41 | host="localhost",
42 | org="my_org",
43 | database="my_db",
44 | token="my_token"
45 | )
46 |
47 | def test_write_polars(self):
48 | import polars as pl
49 | df = pl.DataFrame({
50 | "time": pl.Series(["2024-08-01 00:00:00", "2024-08-01 01:00:00"]).str.to_datetime(time_unit='ns'),
51 | "temperature": [22.4, 21.8],
52 | })
53 | self.client._write_api._write_service = Mock(spec=WriteService)
54 |
55 | self.client.write(
56 | database="database",
57 | record=df,
58 | data_frame_measurement_name="measurement",
59 | data_frame_timestamp_column="time",
60 | )
61 |
62 | actual = self.client._write_api._write_service.post_write.call_args[1]['body']
63 | self.assertEqual(b'measurement temperature=22.4 1722470400000000000\n'
64 | b'measurement temperature=21.8 1722474000000000000', actual)
65 |
66 | def test_write_polars_batching(self):
67 | import polars as pl
68 | df = pl.DataFrame({
69 | "time": pl.Series(["2024-08-01 00:00:00", "2024-08-01 01:00:00"]).str.to_datetime(time_unit='ns'),
70 | "temperature": [22.4, 21.8],
71 | })
72 | self.client = InfluxDBClient3(
73 | host="localhost",
74 | org="my_org",
75 | database="my_db",
76 | token="my_token", write_client_options=write_client_options(
77 | write_options=WriteOptions(batch_size=2)
78 | )
79 | )
80 | self.client._write_api._write_options = WriteOptions(batch_size=2)
81 | self.client._write_api._write_service = Mock(spec=WriteService)
82 |
83 | self.client.write(
84 | database="database",
85 | record=df,
86 | data_frame_measurement_name="measurement",
87 | data_frame_timestamp_column="time",
88 | )
89 |
90 | time.sleep(0.5)
91 | self.client._write_api._write_service.post_write.assert_called_once_with(
92 | org=ANY,
93 | bucket=ANY,
94 | precision=ANY,
95 | no_sync=ANY,
96 | async_req=ANY,
97 | content_type=ANY,
98 | urlopen_kw=ANY,
99 | body=b'measurement temperature=22.4 1722470400000000000\nmeasurement temperature=21.8 1722474000000000000')
100 |
--------------------------------------------------------------------------------
/tests/test_write_file.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | from unittest.mock import Mock
3 |
4 | import pandas as pd
5 |
6 | from influxdb_client_3 import InfluxDBClient3
7 | from influxdb_client_3.write_client.client.write_api import WriteApi
8 |
9 |
10 | def assert_dataframe_equal(a, b, msg=None):
11 | from pandas.testing import assert_frame_equal
12 | assert_frame_equal(a, b)
13 |
14 |
15 | class TestWriteFile(unittest.TestCase):
16 |
17 | def setUp(self):
18 | self.client = InfluxDBClient3(
19 | host="localhost",
20 | org="my_org",
21 | database="my_db",
22 | token="my_token"
23 | )
24 | self.addTypeEqualityFunc(pd.DataFrame, assert_dataframe_equal)
25 |
26 | def test_write_file_csv(self):
27 |
28 | mock_write = Mock(spec=WriteApi)
29 | self.client._write_api.write = mock_write.write
30 |
31 | self.client.write_file(file='tests/data/iot.csv', timestamp_column='time', measurement_name="iot-devices",
32 | tag_columns=["building"], write_precision='s')
33 |
34 | expected_df = pd.DataFrame({
35 | "name": ['iot-devices', 'iot-devices', 'iot-devices'],
36 | "building": ['5a', '5a', '5a'],
37 | "temperature": [72.3, 72.1, 72.2],
38 | "time": pd.to_datetime(["2022-10-01T12:01:00Z", "2022-10-02T12:01:00Z", "2022-10-03T12:01:00Z"])
39 | .astype('datetime64[s, UTC]'),
40 | })
41 | expected = {
42 | 'bucket': 'my_db',
43 | 'record': expected_df,
44 | 'data_frame_measurement_name': 'iot-devices',
45 | 'data_frame_tag_columns': ['building'],
46 | 'data_frame_timestamp_column': 'time',
47 | 'write_precision': 's'
48 | }
49 |
50 | _, actual = mock_write.write.call_args
51 | assert mock_write.write.call_count == 1
52 | assert expected_df.equals(actual['record'])
53 |
54 | # Although dataframes are equal using custom equality function (see above assertion),
55 | # it does not work for nested items (self.assertEqual(expected, actual) fails).
56 | # So remove dataframes and compare the remaining call args.
57 | del expected['record']
58 | del actual['record']
59 | assert actual == expected
60 |
61 | # this would be better instead of the above, but does not work
62 | # mock_write.write.assert_called_once_with(bucket='my-db', record=expected_df,
63 | # data_frame_measurement_name='iot-devices',
64 | # data_frame_tag_columns=['building'],
65 | # data_frame_timestamp_column='time',
66 | # write_precision='s')
67 |
--------------------------------------------------------------------------------
/tests/test_write_local_server.py:
--------------------------------------------------------------------------------
1 | import re
2 | from http import HTTPStatus
3 |
4 | import pytest
5 | from pytest_httpserver import HTTPServer, RequestMatcher
6 |
7 | from influxdb_client_3 import InfluxDBClient3, WriteOptions, WritePrecision, write_client_options, WriteType
8 | from influxdb_client_3.write_client.rest import ApiException
9 |
10 |
11 | class TestWriteLocalServer:
12 | SAMPLE_RECORD = "mem,tag=one value=1.0"
13 |
14 | @staticmethod
15 | def set_response_status(httpserver, response_status_code):
16 | httpserver.expect_request(re.compile(".*")).respond_with_data(status=response_status_code)
17 |
18 | @staticmethod
19 | def assert_request_made(httpserver, matcher):
20 | httpserver.assert_request_made(matcher)
21 | httpserver.check_assertions()
22 |
23 | def test_write_default_params(self, httpserver: HTTPServer):
24 | self.set_response_status(httpserver, 200)
25 |
26 | InfluxDBClient3(
27 | host=(httpserver.url_for("/")), org="ORG", database="DB", token="TOKEN",
28 | write_client_options=write_client_options(
29 | write_options=WriteOptions(write_type=WriteType.synchronous)
30 | )
31 | ).write(self.SAMPLE_RECORD)
32 |
33 | self.assert_request_made(httpserver, RequestMatcher(
34 | method="POST", uri="/api/v2/write",
35 | query_string={"org": "ORG", "bucket": "DB", "precision": "ns"}))
36 |
37 | def test_write_with_write_options(self, httpserver: HTTPServer):
38 | self.set_response_status(httpserver, 200)
39 |
40 | InfluxDBClient3(
41 | host=(httpserver.url_for("/")), org="ORG", database="DB", token="TOKEN",
42 | write_client_options=write_client_options(
43 | write_options=WriteOptions(
44 | write_type=WriteType.synchronous,
45 | write_precision=WritePrecision.US,
46 | no_sync=False
47 | )
48 | ),
49 | ).write(self.SAMPLE_RECORD)
50 |
51 | self.assert_request_made(httpserver, RequestMatcher(
52 | method="POST", uri="/api/v2/write",
53 | query_string={"org": "ORG", "bucket": "DB", "precision": "us"}))
54 |
55 | def test_write_with_no_sync_true(self, httpserver: HTTPServer):
56 | self.set_response_status(httpserver, 200)
57 |
58 | InfluxDBClient3(
59 | host=(httpserver.url_for("/")), org="ORG", database="DB", token="TOKEN",
60 | write_client_options=write_client_options(
61 | write_options=WriteOptions(
62 | write_type=WriteType.synchronous,
63 | write_precision=WritePrecision.US,
64 | no_sync=True
65 | )
66 | )
67 | ).write(self.SAMPLE_RECORD)
68 |
69 | self.assert_request_made(httpserver, RequestMatcher(
70 | method="POST", uri="/api/v3/write_lp",
71 | query_string={"org": "ORG", "db": "DB", "precision": "microsecond", "no_sync": "true"}))
72 |
73 | def test_write_with_no_sync_true_on_v2_server(self, httpserver: HTTPServer):
74 | self.set_response_status(httpserver, HTTPStatus.METHOD_NOT_ALLOWED)
75 |
76 | client = InfluxDBClient3(
77 | host=(httpserver.url_for("/")), org="ORG", database="DB", token="TOKEN",
78 | write_client_options=write_client_options(
79 | write_options=WriteOptions(
80 | write_type=WriteType.synchronous,
81 | no_sync=True)))
82 |
83 | with pytest.raises(ApiException, match=r".*Server doesn't support write with no_sync=true "
84 | r"\(supported by InfluxDB 3 Core/Enterprise servers only\)."):
85 | client.write(self.SAMPLE_RECORD)
86 |
87 | self.assert_request_made(httpserver, RequestMatcher(
88 | method="POST", uri="/api/v3/write_lp",
89 | query_string={"org": "ORG", "db": "DB", "precision": "nanosecond", "no_sync": "true"}))
90 |
91 | def test_write_with_no_sync_false_and_gzip(self, httpserver: HTTPServer):
92 | self.set_response_status(httpserver, 200)
93 |
94 | InfluxDBClient3(
95 | host=(httpserver.url_for("/")), org="ORG", database="DB", token="TOKEN",
96 | write_client_options=write_client_options(
97 | write_options=WriteOptions(
98 | write_type=WriteType.synchronous,
99 | write_precision=WritePrecision.US,
100 | no_sync=False
101 | )
102 | ),
103 | enable_gzip=True
104 | ).write(self.SAMPLE_RECORD)
105 |
106 | self.assert_request_made(httpserver, RequestMatcher(
107 | method="POST", uri="/api/v2/write",
108 | query_string={"org": "ORG", "bucket": "DB", "precision": "us"},
109 | headers={"Content-Encoding": "gzip"}, ))
110 |
111 | def test_write_with_no_sync_true_and_gzip(self, httpserver: HTTPServer):
112 | self.set_response_status(httpserver, 200)
113 |
114 | InfluxDBClient3(
115 | host=(httpserver.url_for("/")), org="ORG", database="DB", token="TOKEN",
116 | write_client_options=write_client_options(
117 | write_options=WriteOptions(
118 | write_type=WriteType.synchronous,
119 | write_precision=WritePrecision.US,
120 | no_sync=True
121 | )
122 | ),
123 | enable_gzip=True
124 | ).write(self.SAMPLE_RECORD)
125 |
126 | self.assert_request_made(httpserver, RequestMatcher(
127 | method="POST", uri="/api/v3/write_lp",
128 | query_string={"org": "ORG", "db": "DB", "precision": "microsecond", "no_sync": "true"},
129 | headers={"Content-Encoding": "gzip"}, ))
130 |
--------------------------------------------------------------------------------
/tests/test_write_precision_converter.py:
--------------------------------------------------------------------------------
1 | import unittest
2 |
3 | from influxdb_client_3.write_client.domain.write_precision import WritePrecision
4 | from influxdb_client_3.write_client.domain.write_precision_converter import WritePrecisionConverter
5 |
6 |
7 | class TestWritePrecisionConverter(unittest.TestCase):
8 |
9 | def test_to_v2_api_string_valid(self):
10 | self.assertEqual(WritePrecisionConverter.to_v2_api_string(WritePrecision.NS), "ns")
11 | self.assertEqual(WritePrecisionConverter.to_v2_api_string(WritePrecision.US), "us")
12 | self.assertEqual(WritePrecisionConverter.to_v2_api_string(WritePrecision.MS), "ms")
13 | self.assertEqual(WritePrecisionConverter.to_v2_api_string(WritePrecision.S), "s")
14 |
15 | def test_to_v2_api_string_unsupported(self):
16 | with self.assertRaises(ValueError) as err:
17 | WritePrecisionConverter.to_v2_api_string("invalid_precision")
18 | self.assertIn("Unsupported precision 'invalid_precision'", str(err.exception))
19 |
20 | with self.assertRaises(ValueError) as err:
21 | WritePrecisionConverter.to_v2_api_string(123)
22 | self.assertIn("Unsupported precision '123'", str(err.exception))
23 |
24 | def test_to_v3_api_string_valid(self):
25 | self.assertEqual(WritePrecisionConverter.to_v3_api_string(WritePrecision.NS), "nanosecond")
26 | self.assertEqual(WritePrecisionConverter.to_v3_api_string(WritePrecision.US), "microsecond")
27 | self.assertEqual(WritePrecisionConverter.to_v3_api_string(WritePrecision.MS), "millisecond")
28 | self.assertEqual(WritePrecisionConverter.to_v3_api_string(WritePrecision.S), "second")
29 |
30 | def test_to_v3_api_string_unsupported(self):
31 | with self.assertRaises(ValueError) as err:
32 | WritePrecisionConverter.to_v3_api_string("unsupported_value")
33 | self.assertIn("Unsupported precision 'unsupported_value'", str(err.exception))
34 |
35 | with self.assertRaises(ValueError) as err:
36 | WritePrecisionConverter.to_v3_api_string(42)
37 | self.assertIn("Unsupported precision '42'", str(err.exception))
38 |
--------------------------------------------------------------------------------
/tests/util/__init__.py:
--------------------------------------------------------------------------------
1 | """Package for tests/util module."""
2 | import asyncio
3 | import inspect
4 | import sys
5 | import traceback
6 |
7 | import pandas
8 |
9 |
10 | def asyncio_run(async_func):
11 | """
12 | Fixture for running tests asynchronously
13 |
14 | Example
15 |
16 | .. sourcecode:: python
17 |
18 | @asyncio_run
19 | async def test_my_feature(self):
20 | asyncio.sleep(1)
21 | print("waking...")
22 | ...
23 |
24 | :param async_func:
25 | :return:
26 | """
27 | def wrapper(*args, **kwargs):
28 | try:
29 | return asyncio.run(async_func(*args, **kwargs))
30 | except Exception as e:
31 | print(traceback.format_exc(), file=sys.stderr)
32 | raise e
33 |
34 | wrapper.__signature__ = inspect.signature(async_func)
35 | return wrapper
36 |
37 |
38 | def lp_to_py_object(lp: str):
39 | """
40 | Result format matches the format of objects returned in pyarrow.Table.to_pylist.
41 |
42 | For verifying test data returned from queries.
43 |
44 | :param lp: a lineprotocol formatted string
45 | :return: a list object
46 | """
47 | result = {}
48 | groups = lp.split(' ')
49 |
50 | tags = groups[0].split(',')
51 | tags.remove(tags[0])
52 | for tag in tags:
53 | t_set = tag.split('=')
54 | result[t_set[0]] = t_set[1]
55 |
56 | fields = groups[1].split(',')
57 |
58 | def check_bool(token):
59 | if token.lower()[0] == 't':
60 | return True
61 | return False
62 |
63 | parse_field_val = {
64 | 'i': lambda s: int(s.replace('i', '')),
65 | 'u': lambda s: int(s.replace('u', '')),
66 | '\"': lambda s: s.replace('"', ''),
67 | 'e': lambda s: check_bool(s),
68 | 'E': lambda s: check_bool(s),
69 | 't': lambda s: check_bool(s),
70 | 'T': lambda s: check_bool(s),
71 | 'f': lambda s: check_bool(s),
72 | 'F': lambda s: check_bool(s),
73 | 'd': lambda s: float(s)
74 | }
75 |
76 | for field in fields:
77 | f_set = field.split('=')
78 | last_char = f_set[1][len(f_set[1]) - 1]
79 | if last_char in '0123456789':
80 | last_char = 'd'
81 | if last_char in parse_field_val.keys():
82 | result[f_set[0]] = parse_field_val[last_char](f_set[1])
83 | else:
84 | result[f_set[0]] = None
85 |
86 | result['time'] = pandas.Timestamp(int(groups[2]))
87 | return result
88 |
--------------------------------------------------------------------------------
/tests/util/mocks.py:
--------------------------------------------------------------------------------
1 | import json
2 | import struct
3 | import time
4 |
5 | from pyarrow import (
6 | array,
7 | Table,
8 | concat_tables, ArrowException
9 | )
10 | from pyarrow.flight import (
11 | FlightServerBase,
12 | RecordBatchStream,
13 | ServerMiddlewareFactory,
14 | FlightUnauthenticatedError,
15 | ServerMiddleware,
16 | GeneratorStream,
17 | ServerAuthHandler
18 | )
19 |
20 |
21 | class NoopAuthHandler(ServerAuthHandler):
22 | """A no-op auth handler - as seen in pyarrow tests"""
23 |
24 | def authenticate(self, outgoing, incoming):
25 | """Do nothing"""
26 |
27 | def is_valid(self, token):
28 | """
29 | Return an empty string
30 | N.B. Returning None causes Type error
31 | :param token:
32 | :return:
33 | """
34 | return ""
35 |
36 |
37 | def case_insensitive_header_lookup(headers, lkey):
38 | """Lookup the value of a given key in the given headers.
39 | The lkey is case-insensitive.
40 | """
41 | for key in headers:
42 | if key.lower() == lkey.lower():
43 | return headers.get(key)
44 |
45 |
46 | req_headers = {}
47 |
48 |
49 | def set_req_headers(headers):
50 | global req_headers
51 | req_headers = headers
52 |
53 |
54 | def get_req_headers():
55 | global req_headers
56 | return req_headers
57 |
58 |
59 | class ConstantData:
60 |
61 | def __init__(self):
62 | self.data = [
63 | array(['temp', 'temp', 'temp']),
64 | array(['kitchen', 'common', 'foyer']),
65 | array([36.9, 25.7, 9.8])
66 | ]
67 | self.names = ['data', 'reference', 'value']
68 |
69 | def to_tuples(self):
70 | response = []
71 | for n in range(3):
72 | response.append((self.data[0][n].as_py(), self.data[1][n].as_py(), self.data[2][n].as_py()))
73 | return response
74 |
75 | def to_list(self):
76 | response = []
77 | for it in range(len(self.data[0])):
78 | item = {}
79 | for o in range(len(self.names)):
80 | item[self.names[o]] = self.data[o][it].as_py()
81 | response.append(item)
82 | return response
83 |
84 |
85 | class ConstantFlightServer(FlightServerBase):
86 |
87 | def __init__(self, location=None, options=None, **kwargs):
88 | super().__init__(location, **kwargs)
89 | self.cd = ConstantData()
90 | self.options = options
91 |
92 | # respond with Constant Data plus fields from ticket
93 | def do_get(self, context, ticket):
94 | result_table = Table.from_arrays(self.cd.data, names=self.cd.names)
95 | tkt = json.loads(ticket.ticket.decode('utf-8'))
96 | for key in tkt.keys():
97 | tkt_data = [
98 | array([key]),
99 | array([tkt[key]]),
100 | array([-1.0])
101 | ]
102 | result_table = concat_tables([result_table, Table.from_arrays(tkt_data, names=self.cd.names)])
103 | return RecordBatchStream(result_table, options=self.options)
104 |
105 |
106 | class ConstantFlightServerDelayed(ConstantFlightServer):
107 |
108 | def __init__(self, location=None, options=None, delay=0.5, **kwargs):
109 | super().__init__(location, **kwargs)
110 | self.delay = delay
111 |
112 | def do_get(self, context, ticket):
113 | time.sleep(self.delay)
114 | return super().do_get(context, ticket)
115 |
116 |
117 | class HeaderCheckServerMiddlewareFactory(ServerMiddlewareFactory):
118 | """Factory to create HeaderCheckServerMiddleware and check header values"""
119 | def start_call(self, info, headers):
120 | auth_header = case_insensitive_header_lookup(headers, "Authorization")
121 | values = auth_header[0].split(' ')
122 | if values[0] != 'Bearer':
123 | raise FlightUnauthenticatedError("Token required")
124 | global req_headers
125 | req_headers = headers
126 | return HeaderCheckServerMiddleware(values[1])
127 |
128 |
129 | class HeaderCheckServerMiddleware(ServerMiddleware):
130 | """
131 | Middleware needed to catch request headers via factory
132 | N.B. As found in pyarrow tests
133 | """
134 | def __init__(self, token, *args, **kwargs):
135 | super().__init__(*args, **kwargs)
136 | self.token = token
137 |
138 | def sending_headers(self):
139 | return {'authorization': 'Bearer ' + self.token}
140 |
141 |
142 | class HeaderCheckFlightServer(FlightServerBase):
143 | """Mock server handle gRPC do_get calls"""
144 | def do_get(self, context, ticket):
145 | """Return something to avoid needless errors"""
146 | data = [
147 | array([b"Vltava", struct.pack('