├── .github
├── FUNDING.yml
├── release-draft-template.yaml
├── renovate.json5
└── workflows
│ ├── docs_publish.yml
│ ├── nightly_testing.yml
│ ├── pypi_publish.yml
│ ├── release-drafter.yml
│ └── testing.yml
├── .gitignore
├── .pre-commit-config.yaml
├── CONTRIBUTING.md
├── LICENSE
├── README.md
├── assets
├── add_in_batches.png
└── searches.png
├── benchmark
└── run_benchmark.py
├── codecov.yml
├── datasets
└── small_movies.json
├── docker-compose.https.yml
├── docker-compose.yml
├── docs
├── .nojekyll
├── CNAME
├── async_client_api.md
├── async_index_api.md
├── client_api.md
├── css
│ └── custom.css
├── decorators_api.md
├── index.md
├── index_api.md
├── js
│ └── umami.js
├── json_handler.md
├── plugins.md
└── pydantic.md
├── examples
├── .gitignore
├── README.md
├── __init__.py
├── add_documents_decorator.py
├── add_documents_in_batches.py
├── async_add_documents_decorator.py
├── async_add_documents_in_batches.py
├── async_documents_and_search_results.py
├── async_search_tracker.py
├── async_update_settings.py
├── documents_and_search_results.py
├── fastapi_example.py
├── orjson_example.py
├── pyproject.toml
├── requirements.txt
├── search_tracker.py
├── tests
│ ├── __init__.py
│ ├── conftest.py
│ ├── test_async_examples.py
│ └── test_examples.py
├── ujson_example.py
└── update_settings.py
├── justfile
├── meilisearch_python_sdk
├── __init__.py
├── _batch.py
├── _client.py
├── _http_requests.py
├── _task.py
├── _utils.py
├── _version.py
├── decorators.py
├── errors.py
├── index.py
├── json_handler.py
├── models
│ ├── __init__.py
│ ├── batch.py
│ ├── client.py
│ ├── documents.py
│ ├── health.py
│ ├── index.py
│ ├── search.py
│ ├── settings.py
│ ├── task.py
│ └── version.py
├── plugins.py
├── py.typed
└── types.py
├── mkdocs.yaml
├── pyproject.toml
├── tests
├── __init__.py
├── conftest.py
├── test_async_client.py
├── test_async_documents.py
├── test_async_index.py
├── test_async_index_plugins.py
├── test_async_search.py
├── test_client.py
├── test_decorators.py
├── test_documents.py
├── test_errors.py
├── test_index.py
├── test_index_plugins.py
├── test_search.py
├── test_utils.py
└── test_version.py
└── uv.lock
/.github/FUNDING.yml:
--------------------------------------------------------------------------------
1 | github: [sanders41]
2 |
--------------------------------------------------------------------------------
/.github/release-draft-template.yaml:
--------------------------------------------------------------------------------
1 | name-template: "v$RESOLVED_VERSION"
2 | tag-template: "v$RESOLVED_VERSION"
3 | exclude-labels:
4 | - "dependencies"
5 | - "skip-changelog"
6 | version-resolver:
7 | major:
8 | labels:
9 | - "breaking-change"
10 | minor:
11 | labels:
12 | - "enhancement"
13 | - "feature"
14 | default: patch
15 | categories:
16 | - title: "⚠️ Breaking changes"
17 | label: "breaking-change"
18 | - title: "Features"
19 | labels:
20 | - "feature"
21 | - "enhancement"
22 | - title: "Bug Fixes"
23 | labels: "bug"
24 | change-template: "- $TITLE @$AUTHOR (#$NUMBER)"
25 | template: |
26 | ## Changes
27 |
28 | $CHANGES
29 |
--------------------------------------------------------------------------------
/.github/renovate.json5:
--------------------------------------------------------------------------------
1 | {
2 | "$schema": "https://docs.renovatebot.com/renovate-schema.json",
3 | "extends": [
4 | "config:recommended",
5 | ":disableDependencyDashboard"
6 | ],
7 | lockFileMaintenance: {
8 | enabled: true,
9 | },
10 | "labels": ["dependencies", "skip-changelog"],
11 | }
12 |
--------------------------------------------------------------------------------
/.github/workflows/docs_publish.yml:
--------------------------------------------------------------------------------
1 | name: Docs Publish
2 | on:
3 | release:
4 | types:
5 | - published
6 | workflow_dispatch:
7 | env:
8 | PYTHON_VERSION: "3.12"
9 | jobs:
10 | deploy:
11 | runs-on: ubuntu-latest
12 | steps:
13 | - uses: actions/checkout@v4
14 | - name: Install uv
15 | uses: astral-sh/setup-uv@v6
16 | with:
17 | enable-cache: true
18 | - name: Set up Python
19 | uses: actions/setup-python@v5
20 | with:
21 | python-version: ${{ env.PYTHON_VERSION }}
22 | - name: Install Dependencies
23 | run: uv sync --frozen --all-extras
24 | - name: Deploy Docs
25 | run: uv run mkdocs gh-deploy --force
26 |
--------------------------------------------------------------------------------
/.github/workflows/nightly_testing.yml:
--------------------------------------------------------------------------------
1 | name: Nightly Testing
2 |
3 | on:
4 | schedule:
5 | # Set with UTC time
6 | - cron: "0 5 * * *"
7 | env:
8 | PYTHON_VERSION: "3.12"
9 | jobs:
10 | random-test-order:
11 | strategy:
12 | fail-fast: false
13 | runs-on: ubuntu-latest
14 | steps:
15 | - uses: actions/checkout@v4
16 | - name: Install uv
17 | uses: astral-sh/setup-uv@v6
18 | with:
19 | enable-cache: true
20 | - name: install Just
21 | uses: taiki-e/install-action@just
22 | - name: Set up Python
23 | uses: actions/setup-python@v5
24 | with:
25 | python-version: ${{ env.PYTHON_VERSION }}
26 | - name: Install Dependencies
27 | run: |
28 | just install
29 | uv pip install pytest-randomly
30 | - name: Test with pytest in random order
31 | run: just test-ci
32 |
--------------------------------------------------------------------------------
/.github/workflows/pypi_publish.yml:
--------------------------------------------------------------------------------
1 | name: PyPi Publish
2 | on:
3 | release:
4 | types:
5 | - published
6 | env:
7 | PYTHON_VERSION: "3.12"
8 | jobs:
9 | deploy:
10 | runs-on: ubuntu-latest
11 | permissions:
12 | # For PyPI's trusted publishing.
13 | id-token: write
14 | steps:
15 | - uses: actions/checkout@v4
16 | - name: Install uv
17 | uses: astral-sh/setup-uv@v6
18 | with:
19 | enable-cache: true
20 | - name: Set up Python
21 | uses: actions/setup-python@v5
22 | with:
23 | python-version: ${{ env.PYTHON_VERSION }}
24 | - name: Install Dependencies
25 | run: uv sync --frozen --all-extras
26 | - name: Build package
27 | run: uv build
28 | - name: Publish package
29 | run: uv publish
30 |
--------------------------------------------------------------------------------
/.github/workflows/release-drafter.yml:
--------------------------------------------------------------------------------
1 | name: Release Drafter
2 |
3 | on:
4 | push:
5 | branches:
6 | - main
7 |
8 | jobs:
9 | update_release_draft:
10 | runs-on: ubuntu-latest
11 | steps:
12 | - uses: release-drafter/release-drafter@v6
13 | with:
14 | config-name: release-draft-template.yaml
15 | env:
16 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
17 |
--------------------------------------------------------------------------------
/.github/workflows/testing.yml:
--------------------------------------------------------------------------------
1 | name: Testing
2 |
3 | on:
4 | push:
5 | branches:
6 | - main
7 | pull_request:
8 | env:
9 | PYTHON_VERSION: "3.9"
10 | jobs:
11 | linting:
12 | runs-on: ubuntu-latest
13 | steps:
14 | - uses: actions/checkout@v4
15 | - name: install Just
16 | uses: taiki-e/install-action@just
17 | - name: Install uv
18 | uses: astral-sh/setup-uv@v6
19 | with:
20 | enable-cache: true
21 | - name: Set up Python
22 | uses: actions/setup-python@v5
23 | with:
24 | python-version: ${{ env.PYTHON_VERSION }}
25 | - name: Install Dependencies
26 | run: just install
27 | - name: mypy check
28 | run: just mypy
29 |
30 | parallel-testing:
31 | strategy:
32 | fail-fast: false
33 | matrix:
34 | python-version: ["3.9", "3.10", "3.11", "3.12", "3.13"]
35 | runs-on: ubuntu-latest
36 | steps:
37 | - uses: actions/checkout@v4
38 | - name: install Just
39 | uses: taiki-e/install-action@just
40 | - name: Install uv
41 | uses: astral-sh/setup-uv@v6
42 | with:
43 | enable-cache: true
44 | - name: Set up Python ${{ matrix.python-version }}
45 | uses: actions/setup-python@v5
46 | with:
47 | python-version: ${{ matrix.python-version }}
48 | - name: Install Dependencies
49 | run: just install
50 | - name: Test with pytest
51 | run: just test-parallel-ci
52 | - name: Upload coverage
53 | uses: codecov/codecov-action@v5.4.3
54 | with:
55 | token: ${{ secrets.CODECOV_TOKEN }}
56 | fail_ci_if_error: true
57 |
58 | parallel-testing-http2:
59 | strategy:
60 | fail-fast: false
61 | matrix:
62 | python-version: ["3.9", "3.10", "3.11", "3.12", "3.13"]
63 | runs-on: ubuntu-latest
64 | steps:
65 | - uses: actions/checkout@v4
66 | - name: install Just
67 | uses: taiki-e/install-action@just
68 | - name: Install uv
69 | uses: astral-sh/setup-uv@v6
70 | with:
71 | enable-cache: true
72 | - name: Set up Python ${{ matrix.python-version }}
73 | uses: actions/setup-python@v5
74 | with:
75 | python-version: ${{ matrix.python-version }}
76 | - name: Install Dependencies
77 | run: just install
78 | - name: Install truststore
79 | if: ${{ ! startsWith(matrix.python-version, '3.9') }}
80 | run: uv pip install truststore
81 | - name: Install ssl requirements
82 | run: |
83 | sudo apt-get update
84 | sudo apt-get install -y libnss3-tools build-essential gcc
85 | /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"
86 | eval "$(/home/linuxbrew/.linuxbrew/bin/brew shellenv)"
87 | brew install mkcert
88 | mkcert -install
89 | mkcert -key-file meilisearch.key -cert-file meilisearch.crt localhost 127.0.0.1 ::1
90 | - name: Test with pytest
91 | run: just test-parallel-ci-http2
92 | - name: Upload coverage
93 | uses: codecov/codecov-action@v5.4.3
94 | with:
95 | token: ${{ secrets.CODECOV_TOKEN }}
96 | fail_ci_if_error: true
97 |
98 | no-parallel-testing:
99 | strategy:
100 | fail-fast: false
101 | matrix:
102 | python-version: ["3.9", "3.10", "3.11", "3.12", "3.13"]
103 | runs-on: ubuntu-latest
104 | steps:
105 | - uses: actions/checkout@v4
106 | - name: install Just
107 | uses: taiki-e/install-action@just
108 | - name: Install uv
109 | uses: astral-sh/setup-uv@v6
110 | with:
111 | enable-cache: true
112 | - name: Set up Python ${{ matrix.python-version }}
113 | uses: actions/setup-python@v5
114 | with:
115 | python-version: ${{ matrix.python-version }}
116 | - name: Install Dependencies
117 | run: just install
118 | - name: Test with pytest
119 | run: just test-no-parallel-ci
120 | - name: Upload coverage
121 | uses: codecov/codecov-action@v5.4.3
122 | with:
123 | token: ${{ secrets.CODECOV_TOKEN }}
124 | fail_ci_if_error: true
125 |
126 | no-parallel-testing-http2:
127 | strategy:
128 | fail-fast: false
129 | matrix:
130 | python-version: ["3.9", "3.10", "3.11", "3.12", "3.13"]
131 | runs-on: ubuntu-latest
132 | steps:
133 | - uses: actions/checkout@v4
134 | - name: install Just
135 | uses: taiki-e/install-action@just
136 | - name: Install uv
137 | uses: astral-sh/setup-uv@v6
138 | with:
139 | enable-cache: true
140 | - name: Set up Python ${{ matrix.python-version }}
141 | uses: actions/setup-python@v5
142 | with:
143 | python-version: ${{ matrix.python-version }}
144 | - name: Install Dependencies
145 | run: just install
146 | - name: Install truststore
147 | if: ${{ ! startsWith(matrix.python-version, '3.9') }}
148 | run: uv pip install truststore
149 | - name: Install ssl requirements
150 | run: |
151 | sudo apt-get update
152 | sudo apt-get install -y libnss3-tools build-essential gcc
153 | /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"
154 | eval "$(/home/linuxbrew/.linuxbrew/bin/brew shellenv)"
155 | brew install mkcert
156 | mkcert -install
157 | mkcert -key-file meilisearch.key -cert-file meilisearch.crt localhost 127.0.0.1 ::1
158 | - name: Test with pytest
159 | run: just test-no-parallel-ci-http2
160 | - name: Upload coverage
161 | uses: codecov/codecov-action@v5.4.3
162 | with:
163 | token: ${{ secrets.CODECOV_TOKEN }}
164 | fail_ci_if_error: true
165 | example-testing:
166 | strategy:
167 | fail-fast: false
168 | matrix:
169 | python-version: ["3.9", "3.10", "3.11", "3.12", "3.13"]
170 | runs-on: ubuntu-latest
171 | steps:
172 | - uses: actions/checkout@v4
173 | - name: install Just
174 | uses: taiki-e/install-action@just
175 | - name: Set up Python ${{ matrix.python-version }}
176 | uses: actions/setup-python@v5
177 | with:
178 | python-version: ${{ matrix.python-version }}
179 | cache: pip
180 | - name: Test with pytest
181 | run: just test-examples-ci
182 |
183 | docs:
184 | runs-on: ubuntu-latest
185 | steps:
186 | - uses: actions/checkout@v4
187 | - name: install Just
188 | uses: taiki-e/install-action@just
189 | - name: Install uv
190 | uses: astral-sh/setup-uv@v6
191 | with:
192 | enable-cache: true
193 | - name: Set up Python
194 | uses: actions/setup-python@v5
195 | with:
196 | python-version: "3.13"
197 | - name: Install Dependencies
198 | run: just install
199 | - name: Test Docs Build
200 | run: just build-docs
201 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | meilisearch.crt
2 | meilisearch.key
3 | # Byte-compiled / optimized / DLL files
4 | __pycache__/
5 | *.py[cod]
6 | *$py.class
7 |
8 | # OS Files
9 | *.swp
10 | *.DS_Store
11 |
12 | # C extensions
13 | *.so
14 |
15 | # Distribution / packaging
16 | .Python
17 | build/
18 | develop-eggs/
19 | dist/
20 | downloads/
21 | eggs/
22 | .eggs/
23 | lib/
24 | lib64/
25 | parts/
26 | sdist/
27 | var/
28 | wheels/
29 | pip-wheel-metadata/
30 | share/python-wheels/
31 | *.egg-info/
32 | .installed.cfg
33 | *.egg
34 | MANIFEST
35 |
36 | # PyInstaller
37 | # Usually these files are written by a python script from a template
38 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
39 | *.manifest
40 | *.spec
41 |
42 | # Installer logs
43 | pip-log.txt
44 | pip-delete-this-directory.txt
45 |
46 | # Unit test / coverage reports
47 | htmlcov/
48 | .tox/
49 | .nox/
50 | .coverage
51 | .coverage.*
52 | .cache
53 | nosetests.xml
54 | coverage.xml
55 | *.cover
56 | *.py,cover
57 | .hypothesis/
58 | .pytest_cache/
59 |
60 | # Translations
61 | *.mo
62 | *.pot
63 |
64 | # Django stuff:
65 | *.log
66 | local_settings.py
67 | db.sqlite3
68 | db.sqlite3-journal
69 |
70 | # Flask stuff:
71 | instance/
72 | .webassets-cache
73 |
74 | # Scrapy stuff:
75 | .scrapy
76 |
77 | # Sphinx documentation
78 | docs/_build/
79 |
80 | # PyBuilder
81 | target/
82 |
83 | # Jupyter Notebook
84 | .ipynb_checkpoints
85 |
86 | # IPython
87 | profile_default/
88 | ipython_config.py
89 |
90 | # pyenv
91 | .python-version
92 |
93 | # pipenv
94 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
95 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
96 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
97 | # install all needed dependencies.
98 | #Pipfile.lock
99 |
100 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow
101 | __pypackages__/
102 |
103 | # Celery stuff
104 | celerybeat-schedule
105 | celerybeat.pid
106 |
107 | # SageMath parsed files
108 | *.sage.py
109 |
110 | # Environments
111 | .env
112 | .venv
113 | env/
114 | venv/
115 | ENV/
116 | env.bak/
117 | venv.bak/
118 |
119 | # Spyder project settings
120 | .spyderproject
121 | .spyproject
122 |
123 | # Rope project settings
124 | .ropeproject
125 |
126 | # mkdocs documentation
127 | /site
128 |
129 | # mypy
130 | .mypy_cache/
131 | .dmypy.json
132 | dmypy.json
133 |
134 | # Pyre type checker
135 | .pyre/
136 |
137 | # editors
138 | .idea
139 | .vscode
140 |
--------------------------------------------------------------------------------
/.pre-commit-config.yaml:
--------------------------------------------------------------------------------
1 | repos:
2 | - repo: https://github.com/pre-commit/pre-commit-hooks
3 | rev: v5.0.0
4 | hooks:
5 | - id: check-added-large-files
6 | - id: check-toml
7 | - id: check-yaml
8 | - id: debug-statements
9 | - id: end-of-file-fixer
10 | - id: trailing-whitespace
11 | - repo: https://github.com/pre-commit/mirrors-mypy
12 | rev: v1.16.0
13 | hooks:
14 | - id: mypy
15 | additional_dependencies: [pydantic, orjson, types-aiofiles, types-ujson]
16 | - repo: https://github.com/astral-sh/ruff-pre-commit
17 | rev: v0.11.12
18 | hooks:
19 | - id: ruff-check
20 | args: [--fix, --exit-non-zero-on-fix]
21 | - id: ruff-format
22 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributing
2 |
3 | ## Where to start
4 |
5 | All contributions, bug reports, bug fixes, documentation improvements, enhancements, and ideas are
6 | welcome.
7 |
8 | The best place to start is to check the
9 | [issues](https://github.com/sanders41/meilisearch-python-sdk/issues) for something that interests you.
10 |
11 | ## Bug Reports
12 |
13 | Please include:
14 |
15 | 1. A short, self-contained Python snippet reproducing the problem. You can format the code by using
16 | [GitHub markdown](https://docs.github.com/en/free-pro-team@latest/github/writing-on-github).
17 | For example:
18 |
19 | ```py
20 | from meilisearch_python_sdk import Client
21 |
22 | async with Client(BASE_URL, MASTER_KEY) as client:
23 | client.index("movies")
24 | ...
25 | ```
26 |
27 | 2. Explain what is currently happening and what you expect instead.
28 |
29 | ## Working on the code
30 |
31 | ### Fork the project
32 |
33 | In order to work on the project you will need your own fork. To do this click the "Fork" button on
34 | this project.
35 |
36 | Once the project is forked clone it to your local machine:
37 |
38 | ```sh
39 | git clone https://github.com/your-user-name/meilisearch-python-sdk.git
40 | cd meilisearch-python-sdk
41 | git remote add upstream https://github.com/sanders41/meilisearch-python-sdk.git
42 | ```
43 |
44 | This creates the directory meilisearch-python-sdk and connects your repository to the upstream
45 | (main project) repository.
46 |
47 | ### Working with the code
48 |
49 | Note: This project uses uv to manage dependencies. If you do not already have uv installed you will
50 | need to install it with the instructions [here](https://docs.astral.sh/uv/getting-started/installation/)
51 |
52 | First the requirements need to be installed.
53 |
54 | ```sh
55 | uv sync --frozen --all-extras
56 | ```
57 |
58 | ### Creating a branch
59 |
60 | You want your main branch to reflect only production-ready code, so create a feature branch for
61 | making your changes. For example:
62 |
63 | ```sh
64 | git checkout -b my-new-feature
65 | ```
66 |
67 | This changes your working directory to the my-new-feature branch. Keep any changes in this branch
68 | specific to one bug or feature so the purpose is clear. You can have many my-new-features and switch
69 | in between them using the git checkout command.
70 |
71 | When creating this branch, make sure your main branch is up to date with the latest upstream
72 | main version. To update your local main branch, you can do:
73 |
74 | ```sh
75 | git checkout main
76 | git pull upstream main --ff-only
77 | ```
78 |
79 | ### Code Standards and tests (ruff, mypy, pytest, and pre-commit)
80 |
81 | meilisearch-python-sdk uses [ruff](https://github.com/charliermarsh/ruff), and
82 | [mypy](https://mypy.readthedocs.io/en/stable/) to ensure consistent code formatting.
83 |
84 | You can run linting on your code at any time with:
85 |
86 | ```sh
87 | # Run ruff formatting
88 | uv run ruff format meilisearch_python_sdk tests
89 |
90 | # Run ruff linting
91 | uv run ruff check .
92 |
93 | # Run mypy
94 | uv run mypy meilisearch_python_sdk
95 | ```
96 |
97 | It is also suggested that you setup [pre-commit](https://pre-commit.com/) in order to run linting
98 | when you commit changes to you branch. To setup pre-commit for this project run:
99 |
100 | ```sh
101 | pre-commit install
102 | ```
103 |
104 | After this pre-commit will automatically run any time you check in code to your branches. You can
105 | also run pre-commit at any time with:
106 |
107 | ```sh
108 | pre-commit run --all-files
109 | ```
110 |
111 | ### Type Hints
112 |
113 | At a minimum all variables/arguments that receive data should contain type hints, and all
114 | functions/methods should specify the return type.
115 |
116 | Accepted examples:
117 |
118 | ```py
119 | def my_function(argument: str) -> None:
120 | ...
121 |
122 |
123 | def another_function(num: int) -> int:
124 | return num + 1
125 | ```
126 |
127 | Rejected examples:
128 |
129 | ```py
130 | def my_function(argument):
131 | ...
132 |
133 |
134 | def another_function(num):
135 | return num + 1
136 | ```
137 |
138 | Type hints on files in the tests directory are optional.
139 |
140 | ### Testing
141 |
142 | This project uses [pytest](https://docs.pytest.org/en/stable/) for testing. Please ensure that any
143 | additions/changes you make to the code have tests to go along with them. Code coverage should not
144 | drop blow it's current level with any pull requests you make, if it does the pull request will not
145 | be accepted. You can view the current coverage level in the codecov badge on the
146 | [main github page](https://github.com/sanders41/meilisearch-python-sdk). You can run tests and see the
147 | code coverage.
148 |
149 | There are multiple way Meilisearch can be setup for testing. The examples below use docker, however
150 | installing and starting Meilisearch with any [installation](https://docs.meilisearch.com/learn/getting_started/quick_start.html#setup-and-installation)
151 | will work.
152 |
153 | Before running the tests start a Docker container running Meilisearch, or start with the appropriate
154 | mething for how you installed Meilisearch if not using docker.
155 |
156 | ```sh
157 | docker pull getmeili/meilisearch:latest
158 | docker run -p 7700:7700 getmeili/meilisearch:latest meilisearch --master-key=masterKey --no-analytics
159 | ```
160 |
161 | Now with the container running, run the test suite
162 |
163 | ```sh
164 | uv run pytest
165 | ```
166 |
167 | In additon to mainting the coverage percentage please ensure that all
168 | tests are passing before submitting a pull request.
169 |
170 | #### just
171 |
172 | If you have [just](https://github.com/casey/just) installed it can be used for testing and linting.
173 |
174 | To run linting:
175 |
176 | ```sh
177 | just lint
178 | ```
179 |
180 | Using just to run the tests will start Meilisearch in a Docker container, run the tests, then stop
181 | the container.
182 |
183 | ```sh
184 | just test
185 | ```
186 |
187 | To see a full list of `just` commands run `just --list`
188 |
189 | ## Docs
190 |
191 | Documentation is automatically generated based on the doc strings from the functions/methods. If
192 | functions/methods are added/removed make sure to update the
193 | [api documentation page](https://github.com/sanders41/meilisearch-python-sdk/docs/api.md) accordingly.
194 |
195 | You can view any changes to the docs locally by running:
196 |
197 | ```sh
198 | mkdocs serve
199 | ```
200 |
201 | Building the docs can be testing by running:
202 |
203 | ```sh
204 | mkdocs build --strict
205 | ```
206 |
207 | ## Committing your code
208 |
209 | Once you have made changes to the code on your branch you can see which files have changed by running:
210 |
211 | ```sh
212 | git status
213 | ```
214 |
215 | If new files were created that and are not tracked by git they can be added by running:
216 |
217 | ```sh
218 | git add .
219 | ```
220 |
221 | Now you can commit your changes in your local repository:
222 |
223 | ```sh
224 | git commit -am 'Some short helpful message to describe your changes'
225 | ```
226 |
227 | If you setup pre-commit and any of the tests fail the commit will be cancelled and you will need to
228 | fix any errors. Once the errors are fixed you can run the same git commit command again.
229 |
230 | ## Push your changes
231 |
232 | Once your changes are ready and all linting/tests are passing you can push your changes to your
233 | forked repository:
234 |
235 | ```sh
236 | git push origin my-new-feature
237 | ```
238 |
239 | origin is the default name of your remote repository on GitHub. You can see all of your remote
240 | repositories by running:
241 |
242 | ```sh
243 | git remote -v
244 | ```
245 |
246 | ## Making a Pull Request
247 |
248 | After pushing your code to origin it is now on GitHub but not yet part of the
249 | meilisearch-python-sdk project. When you’re ready to ask for a code review, file a pull request.
250 | Before you do, once again make sure that you have followed all the guidelines outlined in this
251 | document regarding code style, tests, and documentation.
252 |
253 | ### Make the pull request
254 |
255 | If everything looks good, you are ready to make a pull request. This is how you let the maintainers
256 | of the meilisearch-python-sdk project know you have code ready to be reviewed. To submit the pull
257 | request:
258 |
259 | 1. Navigate to your repository on GitHub
260 | 2. Click on the Pull Request button for your feature branch
261 | 3. You can then click on Commits and Files Changed to make sure everything looks okay one last time
262 | 4. Write a description of your changes in the Conversation tab
263 | 5. Click Send Pull Request
264 |
265 | This request then goes to the repository maintainers, and they will review the code.
266 |
267 | ### Updating your pull request
268 |
269 | Changes to your code may be needed based on the review of your pull request. If this is the case you
270 | can make them in your branch, add a new commit to that branch, push it to GitHub, and the pull
271 | request will be automatically updated. Pushing them to GitHub again is done by:
272 |
273 | ```sh
274 | git push origin my-new-feature
275 | ```
276 |
277 | This will automatically update your pull request with the latest code and restart the Continuous
278 | Integration tests.
279 |
280 | Another reason you might need to update your pull request is to solve conflicts with changes that
281 | have been merged into the main branch since you opened your pull request.
282 |
283 | To do this, you need to rebase your branch:
284 |
285 | ```sh
286 | git checkout my-new-feature
287 | git fetch upstream
288 | git rebase upstream/main
289 | ```
290 |
291 | There may be some merge conficts that need to be resolved. After the feature branch has been update
292 | locally, you can now update your pull request by pushing to the branch on GitHub:
293 |
294 | ```sh
295 | git push origin my-new-feature
296 | ```
297 |
298 | If you rebased and get an error when pushing your changes you can resolve it with:
299 |
300 | ```sh
301 | git push origin my-new-feature --force
302 | ```
303 |
304 | ## Delete your merged branch (optional)
305 |
306 | Once your feature branch is accepted into upstream, you’ll probably want to get rid of the branch.
307 | First, merge upstream main into your main branch so git knows it is safe to delete your branch:
308 |
309 | ```sh
310 | git fetch upstream
311 | git checkout main
312 | git merge upstream/main
313 | ```
314 |
315 | Then you can do:
316 |
317 | ```sh
318 | git branch -d my-new-feature
319 | ```
320 |
321 | Make sure you use a lower-case -d, or else git won’t warn you if your feature branch has not
322 | actually been merged.
323 |
324 | The branch will still exist on GitHub, so to delete it there do:
325 |
326 | ```sh
327 | git push origin --delete my-new-feature
328 | ```
329 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2021 Paul Sanders
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Meilisearch Python SDK
2 |
3 | [](https://github.com/sanders41/meilisearch-python-sdk/actions?query=workflow%3ATesting+branch%3Amain+event%3Apush)
4 | [](https://results.pre-commit.ci/latest/github/sanders41/meilisearch-python-sdk/main)
5 | [](https://codecov.io/gh/sanders41/meilisearch-python-sdk)
6 | [](https://badge.fury.io/py/meilisearch-python-sdk)
7 | [](https://github.com/sanders41/meilisearch-python-sdk)
8 |
9 | Meilisearch Python SDK provides both an async and sync client for the
10 | [Meilisearch](https://github.com/meilisearch/meilisearch) API.
11 |
12 | Which client to use depends on your use case. If the code base you are working with uses asyncio,
13 | for example if you are using [FastAPI](https://fastapi.tiangolo.com/), choose the `AsyncClient`,
14 | otherwise choose the sync `Client`. The functionality of the two clients is the same, the difference
15 | being that the `AsyncClient` provides async methods and uses the `AsyncIndex` with its own
16 | additional async methods. On the other hand, `Client` provides blocking methods and uses the `Index`
17 | with its own blocking methods.
18 |
19 | ## Installation
20 |
21 | Using a virtual environment is recommended for installing this package. Once the virtual
22 | environment is created and activated, install the package with:
23 |
24 | ```sh
25 | pip install meilisearch-python-sdk
26 | ```
27 |
28 | ## Run Meilisearch
29 |
30 | There are several ways to
31 | [run Meilisearch](https://www.meilisearch.com/docs/learn/getting_started/installation).
32 | Pick the one that works best for your use case and then start the server.
33 |
34 | As as example to use Docker:
35 |
36 | ```sh
37 | docker pull getmeili/meilisearch:latest
38 | docker run -it --rm -p 7700:7700 getmeili/meilisearch:latest ./meilisearch --master-key=masterKey
39 | ```
40 |
41 | ## Usage
42 |
43 | ### Add Documents
44 |
45 | #### AsyncClient
46 |
47 | - Note: `client.index("books") creates an instance of an AsyncIndex object but does not make a
48 | network call to send the data yet so it does not need to be awaited.
49 |
50 | ```py
51 | from meilisearch_python_sdk import AsyncClient
52 |
53 | async with AsyncClient('http://127.0.0.1:7700', 'masterKey') as client:
54 | index = client.index("books")
55 |
56 | documents = [
57 | {"id": 1, "title": "Ready Player One"},
58 | {"id": 42, "title": "The Hitchhiker's Guide to the Galaxy"},
59 | ]
60 |
61 | await index.add_documents(documents)
62 | ```
63 |
64 | #### Client
65 |
66 | ```py
67 | from meilisearch_python_sdk import Client
68 |
69 | client = Client('http://127.0.0.1:7700', 'masterKey')
70 | index = client.index("books")
71 |
72 | documents = [
73 | {"id": 1, "title": "Ready Player One"},
74 | {"id": 42, "title": "The Hitchhiker's Guide to the Galaxy"},
75 | ]
76 |
77 | index.add_documents(documents)
78 | ```
79 |
80 | The server will return an update id that can be used to
81 | [get the status](https://www.meilisearch.com/docs/reference/api/tasks#status)
82 | of the updates. To do this you would save the result response from adding the documents to a
83 | variable, this will be an `UpdateId` object, and use it to check the status of the updates.
84 |
85 | #### AsyncClient
86 |
87 | ```py
88 | task = await index.add_documents([{"id": 1, "title": "test"}])
89 | status = await client.get_task(task.task_uid)
90 | ```
91 |
92 | #### Client
93 |
94 | ```py
95 | task = index.add_documents([{"id": 1, "title": "test"}])
96 | status = client.get_task(task.task_uid)
97 | ```
98 |
99 | ### Basic Searching
100 |
101 | #### AsyncClient
102 |
103 | ```py
104 | search_result = await index.search("ready player")
105 | ```
106 |
107 | #### Client
108 |
109 | ```py
110 | search_result = index.search("ready player")
111 | ```
112 |
113 | ### Base Search Results: SearchResults object with values
114 |
115 | ```py
116 | SearchResults(
117 | hits = [
118 | {
119 | "id": 1,
120 | "title": "Ready Player One",
121 | },
122 | ],
123 | offset = 0,
124 | limit = 20,
125 | nb_hits = 1,
126 | exhaustive_nb_hits = bool,
127 | facets_distributionn = None,
128 | processing_time_ms = 1,
129 | query = "ready player",
130 | )
131 | ```
132 |
133 | ### Custom Search
134 |
135 | Information about the parameters can be found in the
136 | [search parameters](https://docs.meilisearch.com/reference/features/search_parameters.html) section
137 | of the documentation.
138 |
139 | #### AsyncClient
140 |
141 | ```py
142 | await index.search(
143 | "guide",
144 | attributes_to_highlight=["title"],
145 | filters="book_id > 10"
146 | )
147 | ```
148 |
149 | #### Client
150 |
151 | ```py
152 | index.search(
153 | "guide",
154 | attributes_to_highlight=["title"],
155 | filters="book_id > 10"
156 | )
157 | ```
158 |
159 | ### Custom Search Results: SearchResults object with values
160 |
161 | ```py
162 | SearchResults(
163 | hits = [
164 | {
165 | "id": 42,
166 | "title": "The Hitchhiker's Guide to the Galaxy",
167 | "_formatted": {
168 | "id": 42,
169 | "title": "The Hitchhiker's Guide to the Galaxy"
170 | }
171 | },
172 | ],
173 | offset = 0,
174 | limit = 20,
175 | nb_hits = 1,
176 | exhaustive_nb_hits = bool,
177 | facets_distributionn = None,
178 | processing_time_ms = 5,
179 | query = "galaxy",
180 | )
181 | ```
182 |
183 | ## Benchmark
184 |
185 | The following benchmarks compare this library to the official
186 | [Meilisearch Python](https://github.com/meilisearch/meilisearch-python) library. Note that all
187 | of the performance gains seen with the `AsyncClient` are achieved by taking advantage of asyncio.
188 | This means that if your code is not taking advantage of asyncio or it does not block the event loop,
189 | the gains here will not be seen and the performance between the clients will be very similar.
190 |
191 | ### Add Documents in Batches
192 |
193 | This test compares how long it takes to send 1 million documents in batches of 1 thousand to the
194 | Meilisearch server for indexing (lower is better). The time does not take into account how long
195 | Meilisearch takes to index the documents since that is outside of the library functionality.
196 |
197 | 
198 |
199 | ### Muiltiple Searches
200 |
201 | This test compares how long it takes to complete 1000 searches (lower is better)
202 |
203 | 
204 |
205 | ### Independent testing
206 |
207 | [Prashanth Rao](https://github.com/prrao87) did some independent testing and found this async client
208 | to be ~30% faster than the sync client for data ingestion. You can find a good write-up of the
209 | results how he tested them in his [blog post](https://thedataquarry.com/posts/meilisearch-async/).
210 |
211 | ## Testing
212 |
213 | [pytest-meilisearch](https://github.com/sanders41/pytest-meilisearch) is a pytest plugin that can
214 | help with testing your code. It provides a lot of the boiler plate code you will need.
215 |
216 | ## Documentation
217 |
218 | See our [docs](https://meilisearch-python-sdk.paulsanders.dev) for the full documentation.
219 |
220 | ## Contributing
221 |
222 | Contributions to this project are welcome. If you are interested in contributing please see our
223 | [contributing guide](CONTRIBUTING.md)
224 |
--------------------------------------------------------------------------------
/assets/add_in_batches.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sanders41/meilisearch-python-sdk/0db7c9b4c4c366e7a91072bcad6826abf42ce861/assets/add_in_batches.png
--------------------------------------------------------------------------------
/assets/searches.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sanders41/meilisearch-python-sdk/0db7c9b4c4c366e7a91072bcad6826abf42ce861/assets/searches.png
--------------------------------------------------------------------------------
/benchmark/run_benchmark.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import asyncio
4 | import json
5 | import random
6 | from collections.abc import Sequence
7 | from copy import deepcopy
8 | from pathlib import Path
9 | from statistics import fmean
10 | from time import time
11 |
12 | from meilisearch import Client as MeilisearchClient
13 | from meilisearch.models.task import TaskInfo as MeiliTaskInfo
14 | from rich.console import Console
15 | from rich.progress import track
16 |
17 | from meilisearch_python_sdk import AsyncClient, Client
18 | from meilisearch_python_sdk.models.task import TaskInfo
19 | from meilisearch_python_sdk.types import JsonDict, JsonMapping
20 |
21 |
22 | def generate_data(add_records: int = 1000000) -> list[JsonDict]:
23 | """Generate data for running the benchmark.
24 |
25 | Defaults to creating a json file with 1000000 documents.
26 | """
27 | small_movies = Path().absolute() / "datasets/small_movies.json"
28 | with open(small_movies) as f:
29 | data = json.load(f)
30 |
31 | updated = deepcopy(data)
32 |
33 | # Start at 10000 to not overlap any ids already in small_movies.json
34 | start = 10000
35 | end = (add_records - len(data)) + start
36 | max_record = len(data) - 1
37 | select = 0
38 | for i in track(range(start, end), description="Generating data..."):
39 | new = deepcopy(data[select])
40 | new["id"] = i
41 | updated.append(new)
42 |
43 | select += 1
44 | if select > max_record:
45 | select = 0
46 |
47 | return updated
48 |
49 |
50 | def create_search_samples() -> list[str]:
51 | """Generate a random sample of movie names for running the search benchmark.
52 |
53 | The samples are generated with repetition, as we have just 30 movies in the dataset.
54 | """
55 | small_movies = Path().absolute() / "datasets/small_movies.json"
56 | data = []
57 | with open(small_movies) as f:
58 | data = json.load(f)
59 |
60 | # We want to search on titles of movies
61 | movie_names = [movie["title"] for movie in data]
62 | # Also consider lower case movie names for variety
63 | movie_names_lower = [movie.lower() for movie in movie_names]
64 | # Sample from both lists with repetition
65 | movies_for_sampling = movie_names + movie_names_lower
66 | movies_sampled = random.choices(movies_for_sampling, k=1000)
67 | return movies_sampled
68 |
69 |
70 | async def benchmark_async_add_document_in_batches(
71 | client: AsyncClient, data: Sequence[JsonMapping]
72 | ) -> tuple[list[TaskInfo], float]:
73 | index = client.index("movies")
74 | start = time()
75 | tasks = await index.add_documents_in_batches(data, batch_size=1000)
76 | end = time()
77 |
78 | return tasks, (end - start)
79 |
80 |
81 | def benchmark_sync_add_document_in_batches(
82 | client: Client, data: Sequence[JsonMapping]
83 | ) -> tuple[list[TaskInfo], float]:
84 | index = client.index("movies")
85 | start = time()
86 | tasks = index.add_documents_in_batches(data, batch_size=1000)
87 | end = time()
88 |
89 | return tasks, (end - start)
90 |
91 |
92 | def benchmark_meili_add_documents_in_batches(
93 | client: MeilisearchClient, data: Sequence[JsonMapping]
94 | ) -> tuple[list[MeiliTaskInfo], float]:
95 | index = client.index("movies")
96 | start = time()
97 | tasks = index.add_documents_in_batches(data, batch_size=1000) # type: ignore
98 | end = time()
99 |
100 | return tasks, (end - start)
101 |
102 |
103 | async def run_async_batch_add_benchmark(data: Sequence[JsonMapping]) -> list[float]:
104 | times = []
105 | for _ in track(range(10), description="Running async add in batches benchmark..."):
106 | async with AsyncClient("http://127.0.0.1:7700", "masterKey") as client:
107 | index = client.index("movies")
108 | _, time_taken = await benchmark_async_add_document_in_batches(client, data)
109 | times.append(time_taken)
110 | task = await client.cancel_tasks()
111 | await client.wait_for_task(task.task_uid, timeout_in_ms=None)
112 | task = await index.delete()
113 | await client.wait_for_task(task.task_uid, timeout_in_ms=None)
114 |
115 | return times
116 |
117 |
118 | async def run_async_search_benchmark(movies_sampled: list[str]) -> list[float]:
119 | times = []
120 | for _ in track(range(10), description="Running async multi search benchmark..."):
121 | async with AsyncClient("http://127.0.0.1:7700", "masterKey") as client:
122 | index = client.index("movies")
123 | searches = []
124 | for movie in movies_sampled:
125 | searches.append(index.search(movie))
126 |
127 | start = time()
128 | await asyncio.gather(*searches)
129 | end = time()
130 | times.append(end - start)
131 |
132 | return times
133 |
134 |
135 | async def setup_index(data: Sequence[JsonMapping]) -> None:
136 | console = Console()
137 | with console.status("Preparing Meilisearch for tests..."):
138 | async with AsyncClient("http://127.0.0.1:7700", "masterKey") as client:
139 | index = await client.create_index("movies")
140 | tasks = await index.add_documents_in_batches(data, batch_size=1000)
141 | waits = [client.wait_for_task(x.task_uid, timeout_in_ms=None) for x in tasks]
142 | await asyncio.gather(*waits)
143 |
144 |
145 | def run_sync_batch_add_benchmark(data: Sequence[JsonMapping]) -> list[float]:
146 | times = []
147 | for _ in track(range(10), description="Running sync add in batches benchmark..."):
148 | client = Client("http://127.0.0.1:7700", "masterKey")
149 | index = client.index("movies")
150 | _, time_taken = benchmark_sync_add_document_in_batches(client, data)
151 | times.append(time_taken)
152 | task = client.cancel_tasks(statuses=["enqueued,processing"])
153 | client.wait_for_task(task.task_uid, timeout_in_ms=600000)
154 | task = index.delete()
155 | client.wait_for_task(task.task_uid, timeout_in_ms=600000)
156 |
157 | return times
158 |
159 |
160 | def run_sync_search_benchmark(movies_sampled: list[str]) -> list[float]:
161 | client = Client("http://127.0.0.1:7700", "masterKey")
162 | index = client.index("movies")
163 | times = []
164 | for _ in track(range(10), description="Running sync multi search benchmark..."):
165 | start = time()
166 | for movie in movies_sampled:
167 | index.search(movie)
168 | end = time()
169 | times.append(end - start)
170 |
171 | return times
172 |
173 |
174 | def run_meili_batch_add_benchmark(data: Sequence[JsonMapping]) -> list[float]:
175 | times = []
176 | for _ in track(range(10), description="Running meili add in batches benchmark..."):
177 | client = MeilisearchClient("http://127.0.0.1:7700", "masterKey")
178 | index = client.index("movies")
179 | _, time_taken = benchmark_meili_add_documents_in_batches(client, data)
180 | times.append(time_taken)
181 | task = client.cancel_tasks({"statuses": "enqueued,processing"})
182 | client.wait_for_task(task.task_uid, timeout_in_ms=600000)
183 | task = index.delete()
184 | client.wait_for_task(task.task_uid, timeout_in_ms=600000)
185 |
186 | return times
187 |
188 |
189 | def run_meili_search_benchmark(movies_sampled: list[str]) -> list[float]:
190 | client = MeilisearchClient("http://127.0.0.1:7700", "masterKey")
191 | index = client.index("movies")
192 | times = []
193 | for _ in track(range(10), description="Running meili multi search benchmark..."):
194 | start = time()
195 | for movie in movies_sampled:
196 | index.search(movie)
197 | end = time()
198 | times.append(end - start)
199 |
200 | return times
201 |
202 |
203 | async def main() -> None:
204 | data = generate_data()
205 | async_add_batches = await run_async_batch_add_benchmark(data)
206 | sync_add_batches = run_sync_batch_add_benchmark(data)
207 | meili_add_batches = run_meili_batch_add_benchmark(data)
208 |
209 | async_add_batches_mean = fmean(async_add_batches)
210 | sync_add_batches_mean = fmean(sync_add_batches)
211 | meili_add_batches_mean = fmean(meili_add_batches)
212 |
213 | print(async_add_batches_mean) # noqa: T201
214 | print(sync_add_batches_mean) # noqa: T201
215 | print(meili_add_batches_mean) # noqa: T201
216 |
217 | await setup_index(data)
218 | movies_sampled = create_search_samples()
219 | async_search = await run_async_search_benchmark(movies_sampled)
220 | sync_search = run_sync_search_benchmark(movies_sampled)
221 | meili_search = run_sync_search_benchmark(movies_sampled)
222 |
223 | async_search_mean = fmean(async_search)
224 | sync_search_mean = fmean(sync_search)
225 | meili_search_mean = fmean(meili_search)
226 |
227 | print(async_search_mean) # noqa: T201
228 | print(sync_search_mean) # noqa: T201
229 | print(meili_search_mean) # noqa: T201
230 |
231 |
232 | if __name__ == "__main__":
233 | # Set the seed to 0 so that the same data is generated each time
234 | random.seed(0)
235 |
236 | asyncio.run(main())
237 |
--------------------------------------------------------------------------------
/codecov.yml:
--------------------------------------------------------------------------------
1 | codecov:
2 | notify:
3 | after_n_builds: 10
4 |
5 | comment:
6 | after_n_builds: 10
7 |
--------------------------------------------------------------------------------
/datasets/small_movies.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "id": "287947",
4 | "title": "Shazam!",
5 | "poster": "https://image.tmdb.org/t/p/w1280/xnopI5Xtky18MPhK40cZAGAOVeV.jpg",
6 | "overview": "A boy is given the ability to become an adult superhero in times of need with a single magic word.",
7 | "release_date": 1553299200,
8 | "genre": "action"
9 | },
10 | {
11 | "id": "299537",
12 | "title": "Captain Marvel",
13 | "poster": "https://image.tmdb.org/t/p/w1280/AtsgWhDnHTq68L0lLsUrCnM7TjG.jpg",
14 | "overview": "The story follows Carol Danvers as she becomes one of the universe’s most powerful heroes when Earth is caught in the middle of a galactic war between two alien races. Set in the 1990s, Captain Marvel is an all-new adventure from a previously unseen period in the history of the Marvel Cinematic Universe.",
15 | "release_date": 1551830400,
16 | "genre": "action"
17 | },
18 | {
19 | "id": "522681",
20 | "title": "Escape Room",
21 | "poster": "https://image.tmdb.org/t/p/w1280/8yZAx7tlKRZIg7pJfaPhl00yHIQ.jpg",
22 | "overview": "Six strangers find themselves in circumstances beyond their control, and must use their wits to survive.",
23 | "release_date": 1546473600
24 | },
25 | {
26 | "id": "166428",
27 | "title": "How to Train Your Dragon: The Hidden World",
28 | "poster": "https://image.tmdb.org/t/p/w1280/xvx4Yhf0DVH8G4LzNISpMfFBDy2.jpg",
29 | "overview": "As Hiccup fulfills his dream of creating a peaceful dragon utopia, Toothless’ discovery of an untamed, elusive mate draws the Night Fury away. When danger mounts at home and Hiccup’s reign as village chief is tested, both dragon and rider must make impossible decisions to save their kind.",
30 | "release_date": 1546473600,
31 | "genre": "cartoon"
32 | },
33 | {
34 | "id": "450465",
35 | "title": "Glass",
36 | "poster": "https://image.tmdb.org/t/p/w1280/svIDTNUoajS8dLEo7EosxvyAsgJ.jpg",
37 | "overview": "In a series of escalating encounters, security guard David Dunn uses his supernatural abilities to track Kevin Wendell Crumb, a disturbed man who has twenty-four personalities. Meanwhile, the shadowy presence of Elijah Price emerges as an orchestrator who holds secrets critical to both men.",
38 | "release_date": 1547596800
39 | },
40 | {
41 | "id": "495925",
42 | "title": "Doraemon the Movie: Nobita's Treasure Island",
43 | "poster": "https://image.tmdb.org/t/p/w1280/cmJ71gdZxCqkMUvGwWgSg3MK7pC.jpg",
44 | "overview": "The story is based on Robert Louis Stevenson's Treasure Island novel.",
45 | "release_date": 1520035200
46 | },
47 | {
48 | "id": "329996",
49 | "title": "Dumbo",
50 | "poster": "https://image.tmdb.org/t/p/w1280/279PwJAcelI4VuBtdzrZASqDPQr.jpg",
51 | "overview": "A young elephant, whose oversized ears enable him to fly, helps save a struggling circus, but when the circus plans a new venture, Dumbo and his friends discover dark secrets beneath its shiny veneer.",
52 | "release_date": 1553644800,
53 | "genre": "cartoon"
54 | },
55 | {
56 | "id": "299536",
57 | "title": "Avengers: Infinity War",
58 | "poster": "https://image.tmdb.org/t/p/w1280/7WsyChQLEftFiDOVTGkv3hFpyyt.jpg",
59 | "overview": "As the Avengers and their allies have continued to protect the world from threats too large for any one hero to handle, a new danger has emerged from the cosmic shadows: Thanos. A despot of intergalactic infamy, his goal is to collect all six Infinity Stones, artifacts of unimaginable power, and use them to inflict his twisted will on all of reality. Everything the Avengers have fought for has led up to this moment - the fate of Earth and existence itself has never been more uncertain.",
60 | "release_date": 1524618000,
61 | "genre": "action"
62 | },
63 | {
64 | "id": "458723",
65 | "title": "Us",
66 | "poster": "https://image.tmdb.org/t/p/w1280/ux2dU1jQ2ACIMShzB3yP93Udpzc.jpg",
67 | "overview": "Husband and wife Gabe and Adelaide Wilson take their kids to their beach house expecting to unplug and unwind with friends. But as night descends, their serenity turns to tension and chaos when some shocking visitors arrive uninvited.",
68 | "release_date": 1552521600
69 | },
70 | {
71 | "id": "424783",
72 | "title": "Bumblebee",
73 | "poster": "https://image.tmdb.org/t/p/w1280/fw02ONlDhrYjTSZV8XO6hhU3ds3.jpg",
74 | "overview": "On the run in the year 1987, Bumblebee finds refuge in a junkyard in a small Californian beach town. Charlie, on the cusp of turning 18 and trying to find her place in the world, discovers Bumblebee, battle-scarred and broken. When Charlie revives him, she quickly learns this is no ordinary yellow VW bug.",
75 | "release_date": 1544832000
76 | },
77 | {
78 | "id": "920",
79 | "title": "Cars",
80 | "poster": "https://image.tmdb.org/t/p/w1280/5damnMcRFKSjhCirgX3CMa88MBj.jpg",
81 | "overview": "Lightning McQueen, a hotshot rookie race car driven to succeed, discovers that life is about the journey, not the finish line, when he finds himself unexpectedly detoured in the sleepy Route 66 town of Radiator Springs. On route across the country to the big Piston Cup Championship in California to compete against two seasoned pros, McQueen gets to know the town's offbeat characters.",
82 | "release_date": 1149728400,
83 | "genre": "cartoon"
84 | },
85 | {
86 | "id": "299534",
87 | "title": "Avengers: Endgame",
88 | "poster": "https://image.tmdb.org/t/p/w1280/dHjLaIUHXcMBt7YxK1TKWK1end9.jpg",
89 | "overview": "After the devastating events of Avengers: Infinity War, the universe is in ruins due to the efforts of the Mad Titan, Thanos. With the help of remaining allies, the Avengers must assemble once more in order to undo Thanos' actions and restore order to the universe once and for all, no matter what consequences may be in store.",
90 | "release_date": 1556067600,
91 | "genre": "action"
92 | },
93 | {
94 | "id": "324857",
95 | "title": "Spider-Man: Into the Spider-Verse",
96 | "poster": "https://image.tmdb.org/t/p/w1280/iiZZdoQBEYBv6id8su7ImL0oCbD.jpg",
97 | "overview": "Miles Morales is juggling his life between being a high school student and being a spider-man. When Wilson 'Kingpin' Fisk uses a super collider, others from across the Spider-Verse are transported to this dimension.",
98 | "release_date": 1544140800,
99 | "genre": "action"
100 | },
101 | {
102 | "id": "157433",
103 | "title": "Pet Sematary",
104 | "poster": "https://image.tmdb.org/t/p/w1280/7SPhr7Qj39vbnfF9O2qHRYaKHAL.jpg",
105 | "overview": "Louis Creed, his wife Rachel and their two children Gage and Ellie move to a rural home where they are welcomed and enlightened about the eerie 'Pet Sematary' located nearby. After the tragedy of their cat being killed by a truck, Louis resorts to burying it in the mysterious pet cemetery, which is definitely not as it seems, as it proves to the Creeds that sometimes dead is better.",
106 | "release_date": 1554339600
107 | },
108 | {
109 | "id": "456740",
110 | "title": "Hellboy",
111 | "poster": "https://image.tmdb.org/t/p/w1280/nUXCJMnAiwCpNPZuJH2n6h5hGtF.jpg",
112 | "overview": "Hellboy comes to England, where he must defeat Nimue, Merlin's consort and the Blood Queen. But their battle will bring about the end of the world, a fate he desperately tries to turn away.",
113 | "release_date": 1554944400
114 | },
115 | {
116 | "id": "537915",
117 | "title": "After",
118 | "poster": "https://image.tmdb.org/t/p/w1280/u3B2YKUjWABcxXZ6Nm9h10hLUbh.jpg",
119 | "overview": "A young woman falls for a guy with a dark secret and the two embark on a rocky relationship.",
120 | "release_date": 1554944400
121 | },
122 | {
123 | "id": "485811",
124 | "title": "Redcon-1",
125 | "poster": "https://image.tmdb.org/t/p/w1280/vVPrWngVJ2cfYAncBedQty69Dlf.jpg",
126 | "overview": "After a zombie apocalypse spreads from a London prison, the UK is brought to its knees. The spread of the virus is temporarily contained but, without a cure, it’s only a matter of time before it breaks its boundaries and the biggest problem of all… any zombies with combat skills are now enhanced. With the South East of England quarantined from the rest of the world using fortified borders, intelligence finds that the scientist responsible for the outbreak is alive and well in London. With his recovery being the only hope of a cure, a squad of eight Special Forces soldiers is sent on a suicide mission to the city, now ruled by the undead, with a single task: get him out alive within 72 hours by any means necessary. What emerges is an unlikely pairing on a course to save humanity against ever-rising odds.",
127 | "release_date": 1538096400
128 | },
129 | {
130 | "id": "471507",
131 | "title": "Destroyer",
132 | "poster": "https://image.tmdb.org/t/p/w1280/sHw9gTdo43nJL82py0oaROkXXNr.jpg",
133 | "overview": "Erin Bell is an LAPD detective who, as a young cop, was placed undercover with a gang in the California desert with tragic results. When the leader of that gang re-emerges many years later, she must work her way back through the remaining members and into her own history with them to finally reckon with the demons that destroyed her past.",
134 | "release_date": 1545696000
135 | },
136 | {
137 | "id": "400650",
138 | "title": "Mary Poppins Returns",
139 | "poster": "https://image.tmdb.org/t/p/w1280/uTVGku4LibMGyKgQvjBtv3OYfAX.jpg",
140 | "overview": "In Depression-era London, a now-grown Jane and Michael Banks, along with Michael's three children, are visited by the enigmatic Mary Poppins following a personal loss. Through her unique magical skills, and with the aid of her friend Jack, she helps the family rediscover the joy and wonder missing in their lives.",
141 | "release_date": 1544659200
142 | },
143 | {
144 | "id": "297802",
145 | "title": "Aquaman",
146 | "poster": "https://image.tmdb.org/t/p/w1280/5Kg76ldv7VxeX9YlcQXiowHgdX6.jpg",
147 | "overview": "Once home to the most advanced civilization on Earth, Atlantis is now an underwater kingdom ruled by the power-hungry King Orm. With a vast army at his disposal, Orm plans to conquer the remaining oceanic people and then the surface world. Standing in his way is Arthur Curry, Orm's half-human, half-Atlantean brother and true heir to the throne.",
148 | "release_date": 1544140800,
149 | "genre": "action"
150 | },
151 | {
152 | "id": "512196",
153 | "title": "Happy Death Day 2U",
154 | "poster": "https://image.tmdb.org/t/p/w1280/4tdnePOkOOzwuGPEOAHp8UA4vqx.jpg",
155 | "overview": "Collegian Tree Gelbman wakes up in horror to learn that she's stuck in a parallel universe. Her boyfriend Carter is now with someone else, and her friends and fellow students seem to be completely different versions of themselves. When Tree discovers that Carter's roommate has been altering time, she finds herself once again the target of a masked killer. When the psychopath starts to go after her inner circle, Tree soon realizes that she must die over and over again to save everyone.",
156 | "release_date": 1550016000
157 | },
158 | {
159 | "id": "390634",
160 | "title": "Fate/stay night: Heaven’s Feel II. lost butterfly",
161 | "poster": "https://image.tmdb.org/t/p/w1280/4tS0iyKQBDFqVpVcH21MSJwXZdq.jpg",
162 | "overview": "Theatrical-release adaptation of the visual novel 'Fate/stay night', following the third and final route. (Part 2 of a trilogy.)",
163 | "release_date": 1547251200
164 | },
165 | {
166 | "id": "500682",
167 | "title": "The Highwaymen",
168 | "poster": "https://image.tmdb.org/t/p/w1280/4bRYg4l12yDuJvAfqvUOPnBrxno.jpg",
169 | "overview": "In 1934, Frank Hamer and Manny Gault, two former Texas Rangers, are commissioned to put an end to the wave of vicious crimes perpetrated by Bonnie Parker and Clyde Barrow, a notorious duo of infamous robbers and cold-blooded killers who nevertheless are worshiped by the public.",
170 | "release_date": 1552608000
171 | },
172 | {
173 | "id": "454294",
174 | "title": "The Kid Who Would Be King",
175 | "poster": "https://image.tmdb.org/t/p/w1280/kBuvLX6zynQP0sjyqbXV4jNaZ4E.jpg",
176 | "overview": "Old-school magic meets the modern world when young Alex stumbles upon the mythical sword Excalibur. He soon unites his friends and enemies, and they become knights who join forces with the legendary wizard Merlin. Together, they must save mankind from the wicked enchantress Morgana and her army of supernatural warriors.",
177 | "release_date": 1547596800
178 | },
179 | {
180 | "id": "543103",
181 | "title": "Kamen Rider Heisei Generations FOREVER",
182 | "poster": "https://image.tmdb.org/t/p/w1280/6sOFQDlkY6El1B2P5gklzJfVdsT.jpg",
183 | "overview": "In the world of Sougo Tokiwa and Sento Kiryu, their 'companions' are losing their memories one after the other as they're replaced by other people. The Super Time Jacker, Tid , appears before them. He orders his powerful underlings, Another Double and Another Den-O, to pursue a young boy called Shingo. While fighting to protect Shingo, Sougo meets Ataru, a young man who loves Riders, but Ataru says that Kamen Riders aren't real. What is the meaning of those words? While the mystery deepens, the true enemy that Sougo and Sento must defeat appears in the Kuriogatake mountain...",
184 | "release_date": 1545436800
185 | },
186 | {
187 | "id": "404368",
188 | "title": "Ralph Breaks the Internet",
189 | "poster": "https://image.tmdb.org/t/p/w1280/lvfIaThG5HA8THf76nghKinjjji.jpg",
190 | "overview": "Video game bad guy Ralph and fellow misfit Vanellope von Schweetz must risk it all by traveling to the World Wide Web in search of a replacement part to save Vanellope's video game, 'Sugar Rush.' In way over their heads, Ralph and Vanellope rely on the citizens of the internet -- the netizens -- to help navigate their way, including an entrepreneur named Yesss, who is the head algorithm and the heart and soul of trend-making site BuzzzTube.",
191 | "release_date": 1542672000
192 | },
193 | {
194 | "id": "338952",
195 | "title": "Fantastic Beasts: The Crimes of Grindelwald",
196 | "poster": "https://image.tmdb.org/t/p/w1280/fMMrl8fD9gRCFJvsx0SuFwkEOop.jpg",
197 | "overview": "Gellert Grindelwald has escaped imprisonment and has begun gathering followers to his cause—elevating wizards above all non-magical beings. The only one capable of putting a stop to him is the wizard he once called his closest friend, Albus Dumbledore. However, Dumbledore will need to seek help from the wizard who had thwarted Grindelwald once before, his former student Newt Scamander, who agrees to help, unaware of the dangers that lie ahead. Lines are drawn as love and loyalty are tested, even among the truest friends and family, in an increasingly divided wizarding world.",
198 | "release_date": 1542153600,
199 | "genre": "fantasy"
200 | },
201 | {
202 | "id": "399579",
203 | "title": "Alita: Battle Angel",
204 | "poster": "https://image.tmdb.org/t/p/w1280/xRWht48C2V8XNfzvPehyClOvDni.jpg",
205 | "overview": "When Alita awakens with no memory of who she is in a future world she does not recognize, she is taken in by Ido, a compassionate doctor who realizes that somewhere in this abandoned cyborg shell is the heart and soul of a young woman with an extraordinary past.",
206 | "release_date": 1548892800
207 | },
208 | {
209 | "id": "450001",
210 | "title": "Master Z: Ip Man Legacy",
211 | "poster": "https://image.tmdb.org/t/p/w1280/nkCoAik5I4j3Gkd2upj9xv4F0QN.jpg",
212 | "overview": "After being defeated by Ip Man, Cheung Tin Chi is attempting to keep a low profile. While going about his business, he gets into a fight with a foreigner by the name of Davidson, who is a big boss behind the bar district. Tin Chi fights hard with Wing Chun and earns respect.",
213 | "release_date": 1545264000
214 | },
215 | {
216 | "id": "504172",
217 | "title": "The Mule",
218 | "poster": "https://image.tmdb.org/t/p/w1280/oeZh7yEz3PMnZLgBPhrafFHRbVz.jpg",
219 | "overview": "Earl Stone, a man in his 80s who is broke, alone, and facing foreclosure of his business when he is offered a job that simply requires him to drive. Easy enough, but, unbeknownst to Earl, he’s just signed on as a drug courier for a Mexican cartel. He does so well that his cargo increases exponentially, and Earl hit the radar of hard-charging DEA agent Colin Bates.",
220 | "release_date": 1544745600
221 | }
222 | ]
223 |
--------------------------------------------------------------------------------
/docker-compose.https.yml:
--------------------------------------------------------------------------------
1 | services:
2 | meilisearch:
3 | image: getmeili/meilisearch:latest
4 | ports:
5 | - "7700:7700"
6 | environment:
7 | - MEILI_MASTER_KEY=masterKey
8 | - MEILI_NO_ANALYTICS=true
9 | volumes:
10 | - ./meilisearch.key:/meilisearch.key
11 | - ./meilisearch.crt:/meilisearch.crt
12 | command:
13 | [
14 | "meilisearch",
15 | "--ssl-cert-path",
16 | "/meilisearch.crt",
17 | "--ssl-key-path",
18 | "/meilisearch.key",
19 | ]
20 |
--------------------------------------------------------------------------------
/docker-compose.yml:
--------------------------------------------------------------------------------
1 | services:
2 | meilisearch:
3 | image: getmeili/meilisearch:latest
4 | ports:
5 | - "7700:7700"
6 | environment:
7 | - MEILI_MASTER_KEY=masterKey
8 | - MEILI_NO_ANALYTICS=true
9 |
--------------------------------------------------------------------------------
/docs/.nojekyll:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sanders41/meilisearch-python-sdk/0db7c9b4c4c366e7a91072bcad6826abf42ce861/docs/.nojekyll
--------------------------------------------------------------------------------
/docs/CNAME:
--------------------------------------------------------------------------------
1 | meilisearch-python-sdk.paulsanders.dev
2 |
--------------------------------------------------------------------------------
/docs/async_client_api.md:
--------------------------------------------------------------------------------
1 | ## `AsyncClient` Usage
2 |
3 | ### Create a client with a context manager
4 |
5 | This client runs in a context manager which ensures that everything is cleaned up after the use of
6 | the client is done. To create a client:
7 |
8 | ```py
9 | from meilisearch-python-sdk import AsyncClient
10 |
11 |
12 | async with AsyncClient("http://localhost:7700", "masterKey") as client:
13 | index = client.index("movies")
14 | ...
15 | ```
16 |
17 | ### Custom headers
18 |
19 | Custom headers can be added to the client by adding them to `custom_headers` when creating the
20 | client.
21 |
22 | ```py
23 | from meilisearch_python_sdk import AsyncClient
24 |
25 | async with AsyncClient(
26 | "http://127.0.0.1:7700",
27 | "masterKey",
28 | custom_headers={"header_key_1": "header_value_1", "header_key_2": "header_value_2"}
29 | ) as client:
30 | index = client.index("movies")
31 | ...
32 | ```
33 |
34 | ### Create a client without a context manager
35 |
36 | It is also possible to call the client without using a context manager, but in doing so you will
37 | need to make sure to do the cleanup yourself:
38 |
39 | ```py
40 | from meilisearch-python-sdk import AsyncClient
41 |
42 |
43 | try:
44 | client = AsyncClient("http://localhost:7700", "masterKey")
45 | ...
46 | finally:
47 | await client.aclose()
48 |
49 | ```
50 |
51 | ## `AsyncClient` API
52 |
53 | ::: meilisearch_python_sdk.AsyncClient
54 |
--------------------------------------------------------------------------------
/docs/async_index_api.md:
--------------------------------------------------------------------------------
1 | ## `AsyncIndex` Usage
2 |
3 | The `AsyncIndex` is the the same as the `Index`, but gives asyncronous methods to work with, and
4 | and should be used when using the `AsyncClient`. When you create a new index with the `AsyncClient`
5 | it will create an `AsyncIndex` instance.
6 |
7 | ## `AsyncIndex` API
8 |
9 | ::: meilisearch_python_sdk.index.AsyncIndex
10 |
--------------------------------------------------------------------------------
/docs/client_api.md:
--------------------------------------------------------------------------------
1 | ## `client` Usage
2 |
3 | ### Create a client
4 |
5 | To create a client:
6 |
7 | ```py
8 | from milisearch_python_sdk import Client
9 |
10 |
11 | client = Client("http://localhost:7700", "masterKey")
12 | index = client.index("movies")
13 | ...
14 | ```
15 |
16 | ### Custom headers
17 |
18 | Custom headers can be added to the client by adding them to `custom_headers` when creating the
19 | client.
20 |
21 | ```py
22 | from meilisearch_python_sdk import Client
23 |
24 | client = Client(
25 | "http://127.0.0.1:7700",
26 | "masterKey",
27 | custom_headers={"header_key_1": "header_value_1", "header_key_2": "header_value_2"}
28 | )
29 | index = client.index("movies")
30 | ...
31 | ```
32 |
33 | ## `Client` API
34 |
35 | ::: meilisearch_python_sdk.Client
36 |
--------------------------------------------------------------------------------
/docs/css/custom.css:
--------------------------------------------------------------------------------
1 | .md-source__repository {
2 | overflow: visible;
3 | }
4 |
5 | div.autodoc-docstring {
6 | padding-left: 20px;
7 | margin-bottom: 30px;
8 | border-left: 5px solid rgba(230, 230, 230);
9 | }
10 |
11 | div.autodoc-members {
12 | padding-left: 20px;
13 | margin-bottom: 15px;
14 | }
15 |
--------------------------------------------------------------------------------
/docs/decorators_api.md:
--------------------------------------------------------------------------------
1 | ## Decorator Usage
2 |
3 | Various decorators are provided that can be used to help with the Meilisearch interaction.
4 |
5 | ::: meilisearch_python_sdk.decorators
6 |
--------------------------------------------------------------------------------
/docs/index.md:
--------------------------------------------------------------------------------
1 | # Meilisearch Python SDK
2 |
3 | Meilisearch Python SDK provides both an async and sync client for the
4 | [Meilisearch](https://github.com/meilisearch/meilisearch) API.
5 |
6 | The focus of this documentation is on the Meilisearch Python SDK API. More information of
7 | Meilisearch itself and how to use it can be found in [here](https://www.meilisearch.com/docs).
8 |
9 | ## Which client to chose
10 |
11 | If the code base you are working with uses asyncio, for example if you are using
12 | [FastAPI](https://fastapi.tiangolo.com/), chose the `AsyncClint` otherwise chose the `Client`.
13 | The functionality of the two clients is the same, the difference being the `AsyncClient` provides
14 | async methods and uses the `AsyncIndex`, which also provides async methods, while the `Client`
15 | provides blocking methods and uses the `Index`, which also provides blocking methods.
16 |
--------------------------------------------------------------------------------
/docs/index_api.md:
--------------------------------------------------------------------------------
1 | ## `Index` Usage
2 |
3 | The `Index` uses blocking methods, and and should be used when using the `Client`. When you create
4 | a new index with the `Client` it will create an `Index` instance.
5 |
6 | ## `Index` API
7 |
8 | ::: meilisearch_python_sdk.index.Index
9 |
--------------------------------------------------------------------------------
/docs/js/umami.js:
--------------------------------------------------------------------------------
1 | document.addEventListener("DOMContentLoaded", function () {
2 | var umamiScript = document.createElement("script");
3 | umamiScript.defer = true;
4 | umamiScript.src = "https://cloud.umami.is/script.js";
5 | umamiScript.dataset.websiteId = "29a9d193-92a3-44d0-9b1d-16ddd23898fb";
6 | document.head.appendChild(umamiScript);
7 | });
8 |
--------------------------------------------------------------------------------
/docs/json_handler.md:
--------------------------------------------------------------------------------
1 | # JSON Handler
2 |
3 | For json loads and dumps you have the option to use the `json` module from the standard libary,
4 | orjson, or ujson. This done by setting the `json_handler` when creating the `AsyncClient` or
5 | `Client`. By default the standard library `json` module will be used. The examples below use
6 | `Client`, and the same options are available for `AsyncClient`.
7 |
8 | ## Standard Library `json` Module
9 |
10 | ### Custom Serializer
11 |
12 | In some cases your documents will contain types that the Python JSON serializer does not know how
13 | to handle. When this happens you can provide your own custom serializer when using the `json`
14 | module.
15 |
16 | ### Example
17 |
18 | ```py
19 | from datetime import datetime
20 | from json import JSONEncoder
21 | from uuid import uuid4
22 |
23 | from meilisearch_python_sdk import Client
24 | from meilisearch_python_sdk.json_handler import BuiltinHandler
25 |
26 |
27 | class CustomEncoder(JSONEncoder):
28 | def default(self, o):
29 | if isinstance(o, (UUID, datetime)):
30 | return str(o)
31 |
32 | # Let the base class default method raise the TypeError
33 | return super().default(o)
34 |
35 |
36 | documents = [
37 | {"id": uuid4(), "title": "test 1", "when": datetime.now()},
38 | {"id": uuid4(), "title": "Test 2", "when": datetime.now()},
39 | ]
40 | client = Client("http://127.0.0.1:7700", json_handler=BuiltinHandler(serializer=CustomEncoder))
41 | index = client.index("movies", primary_key="id")
42 | index.add_documents(documents)
43 | ```
44 |
45 | ## orjson
46 |
47 | ### Example
48 |
49 | ```py
50 | from uuid import uuid4
51 |
52 | from meilisearch_python_sdk import Client
53 | from meilisearch_python_sdk.json_handler import OrjsonHandler
54 |
55 |
56 | documents = [
57 | {"id": uuid4(), "title": "test 1"},
58 | {"id": uuid4(), "title": "Test 2"},
59 | ]
60 | client = Client("http://127.0.0.1:7700", json_handler=OrjsonHandler())
61 | index = client.index("movies", primary_key="id")
62 | index.add_documents(documents)
63 | ```
64 |
65 | ## ujson
66 |
67 | ### Example
68 |
69 | ```py
70 | from uuid import uuid4
71 |
72 | from meilisearch_python_sdk import Client
73 | from meilisearch_python_sdk.json_handler import UjsonHandler
74 |
75 |
76 | documents = [
77 | {"id": uuid4(), "title": "test 1"},
78 | {"id": uuid4(), "title": "Test 2"},
79 | ]
80 | client = Client("http://127.0.0.1:7700", json_handler=UjsonHandler())
81 | index = client.index("movies", primary_key="id")
82 | index.add_documents(documents)
83 | ```
84 |
--------------------------------------------------------------------------------
/docs/plugins.md:
--------------------------------------------------------------------------------
1 | # Plugins
2 |
3 | Plugins can be used to extend the functionality of certain methods, currently plugins are only
4 | supported for indexes. To create plugins you creat a class that implements the Protocol for the
5 | plugin, then add an instance of your class to the plugins when creating an index. Passing protocols
6 | is done through a named tuple that specifies where the plugin should run. The options are:
7 |
8 | - add_documents_plugins: Runs the plugins when adding documents. This runs for all the add documents
9 | methods, i.e. `add_documents_in_batches`.
10 | - delete_all_documents_plugins: Run on the `delete_all_documents` method.
11 | - delete_document_plugins: Run on the `delete_document` method.
12 | - delete_documents_plugins: Run on the `delete_documents` method.
13 | - delete_documents_by_filter_plugins: Run on the `delete_documents_by_filter` method.
14 | - search_plugins: Run on the `search` and `facet_search` methods.
15 | - update_documents_plugins: Run on the `update_document` method.
16 |
17 | When creating your plugin you specify if you want it to run before or after the default
18 | functionality. Additionaly plugins for async indexes can be run concurrently with the default
19 | functionality.
20 |
21 | ## Examples:
22 |
23 | ### Search metrics
24 |
25 | It is common to want to know what users are searching for, however Meilisearch doesn't provide a
26 | way to track this out of the box. A search plugin could be used to implement this functionality
27 | yourself.
28 |
29 | Note that in these examples the protocol is satisfied by providing the `CONNECURRENT_EVENT`,
30 | `POST_EVENT`, and `PRE_EVENT` vairables and the
31 | `async def run_plugin(self, event: AsyncEvent, **kwargs: Any) -> None:` method for an async index,
32 | or the `POST_EVENT` and `PRE_EVENT` vairables , and
33 | `def run_plugin(self, event: Event, **kwargs: Any) -> None:` method for a non-async index. You
34 | class can contain any additional methods/variables needed as long as the protocol requirements
35 | have been satisfied.
36 |
37 | #### Async index
38 |
39 | ```py
40 | import asyncio
41 | import json
42 | import sqlite3
43 | from typing import Any
44 |
45 | from meilisearch_python_sdk import AsyncClient
46 | from meilisearch_python_sdk.plugins import AsyncEvent, AsyncIndexPlugins
47 |
48 |
49 | class SearchTrackerPlugin:
50 | CONCURRENT_EVENT = True # Specifies the plugin should be run concurrently with the search
51 | POST_EVENT = False
52 | PRE_EVENT = False
53 |
54 | def __init__(self) -> None:
55 | self.conn = sqlite3.Connection("examples/search_tracker.db")
56 | self.create_table()
57 |
58 | def create_table(self) -> None:
59 | try:
60 | cursor = self.conn.cursor()
61 | cursor.execute("CREATE TABLE IF NOT EXISTS searches(query STRING)")
62 | finally:
63 | cursor.close()
64 |
65 | async def run_plugin(self, event: AsyncEvent, **kwargs: Any) -> None:
66 | """Note that this example uses sqlite which does not provide an async driver.
67 |
68 | Typically if you are using the AsyncClient you would also be using an async driver for the
69 | database. sqlite is used in this example for simplicity.
70 | """
71 | if kwargs.get("query"):
72 | self.save_search_query(kwargs["query"])
73 |
74 | def save_search_query(self, query: str) -> None:
75 | try:
76 | cursor = self.conn.cursor()
77 | cursor.execute("INSERT INTO searches VALUES(?)", (query,))
78 | self.conn.commit()
79 | finally:
80 | cursor.close()
81 |
82 |
83 | async def main() -> int:
84 | with open("datasets/small_movies.json") as f:
85 | documents = json.load(f)
86 |
87 | client = AsyncClient("http://127.0.0.1:7700", "masterKey")
88 | plugins = AsyncIndexPlugins(search_plugins=(SearchTrackerPlugin(),))
89 | index = await client.create_index("movies", primary_key="id", plugins=plugins)
90 | task = await index.add_documents(documents)
91 | await client.wait_for_task(task.task_uid)
92 | result = await index.search("Cars")
93 | print(result) # noqa: T201
94 |
95 | return 0
96 |
97 |
98 | if __name__ == "__main__":
99 | raise SystemExit(asyncio.run(main()))
100 | ```
101 |
102 | #### Index
103 |
104 | ```py
105 | import json
106 | import sqlite3
107 | from typing import Any
108 |
109 | from meilisearch_python_sdk import Client
110 | from meilisearch_python_sdk.plugins import Event, IndexPlugins
111 |
112 |
113 | class SearchTrackerPlugin:
114 | POST_EVENT = False
115 | PRE_EVENT = True # Specifies the plugin should be run before the search
116 |
117 | def __init__(self) -> None:
118 | self.conn = sqlite3.Connection("examples/search_tracker.db")
119 | self.create_table()
120 |
121 | def create_table(self) -> None:
122 | try:
123 | cursor = self.conn.cursor()
124 | cursor.execute("CREATE TABLE IF NOT EXISTS searches(query STRING)")
125 | finally:
126 | cursor.close()
127 |
128 | def run_plugin(self, event: Event, **kwargs: Any) -> None:
129 | if kwargs.get("query"):
130 | self.save_search_query(kwargs["query"])
131 |
132 | def save_search_query(self, query: str) -> None:
133 | try:
134 | cursor = self.conn.cursor()
135 | cursor.execute("INSERT INTO searches VALUES(?)", (query,))
136 | self.conn.commit()
137 | finally:
138 | cursor.close()
139 |
140 |
141 | def main() -> int:
142 | with open("datasets/small_movies.json") as f:
143 | documents = json.load(f)
144 |
145 | client = Client("http://127.0.0.1:7700", "masterKey")
146 | plugins = IndexPlugins(search_plugins=(SearchTrackerPlugin(),))
147 | index = client.create_index("movies", primary_key="id", plugins=plugins)
148 | task = index.add_documents(documents)
149 | client.wait_for_task(task.task_uid)
150 | result = index.search("Cars")
151 | print(result) # noqa: T201
152 |
153 | return 0
154 |
155 |
156 | if __name__ == "__main__":
157 | raise SystemExit(main())
158 | ```
159 |
160 | ### Modify documents and search results
161 |
162 | A pre event plugin can be used to modify the documents before sending for indexing. In this example
163 | a new `access` field will be added to the doocuments before they are added or updated. The example
164 | will set every other record to `admin` access with the other records being set to `read`. This will
165 | illustrate the idea of modifing documents even it if doesn't make real world sense.
166 |
167 | A post search plugin, this type of search plugin can only be used post search because it requires
168 | the result of the search, will be used to remove records marked as `admin` before returing the result.
169 | In the real world this filtering would probably be done with a filterable field in Meilisearch,but
170 | again, this is just used here to illustrate the idea.
171 |
172 | #### Async Index
173 |
174 | ```py
175 | import asyncio
176 | import json
177 | from typing import Any, Sequence
178 |
179 | from meilisearch_python_sdk import AsyncClient
180 | from meilisearch_python_sdk.models.search import SearchResults
181 | from meilisearch_python_sdk.plugins import AsyncEvent, AsyncIndexPlugins
182 | from meilisearch_python_sdk.types import JsonMapping
183 |
184 |
185 | class ModifyDocumentPlugin:
186 | CONCURRENT_EVENT = False
187 | POST_EVENT = False
188 | PRE_EVENT = True # Specifies the plugin should be run before adding documents
189 |
190 | async def run_document_plugin(
191 | self, event: AsyncEvent, *, documents: Sequence[JsonMapping], **kwargs: Any
192 | ) -> Sequence[JsonMapping]:
193 | updated = []
194 | for i, document in enumerate(documents):
195 | if i % 2 == 0:
196 | document["access"] = "admin"
197 | else:
198 | document["access"] = "read"
199 |
200 | updated.append(document)
201 |
202 | return updated
203 |
204 |
205 | class FilterSearchResultsPlugin:
206 | CONCURRENT_EVENT = False
207 | POST_EVENT = True # Specifies the plugin should be run after the search
208 | PRE_EVENT = False
209 |
210 | async def run_post_search_plugin(
211 | self, event: AsyncEvent, *, search_results: SearchResults, **kwargs: Any
212 | ) -> SearchResults:
213 | filtered_hits = []
214 | for hit in search_results.hits:
215 | if hit["access"] != "admin":
216 | filtered_hits.append(hit)
217 |
218 | search_results.hits = filtered_hits
219 |
220 | return search_results
221 |
222 |
223 | async def main() -> int:
224 | with open("datasets/small_movies.json") as f:
225 | documents = json.load(f)
226 |
227 | client = AsyncClient("http://127.0.0.1:7700", "masterKey")
228 | plugins = AsyncIndexPlugins(
229 | add_documents_plugins=(ModifyDocumentPlugin(),),
230 | update_documents_plugins=(ModifyDocumentPlugin(),),
231 | search_plugins=(FilterSearchResultsPlugin(),),
232 | )
233 | index = await client.create_index("movies", primary_key="id", plugins=plugins)
234 | task = await index.add_documents(documents)
235 | await client.wait_for_task(task.task_uid)
236 | result = await index.search("cars")
237 | print(result) # noqa: T201
238 |
239 | return 0
240 |
241 |
242 | if __name__ == "__main__":
243 | raise SystemExit(asyncio.run(main()))
244 | ```
245 |
246 | #### Index
247 |
248 | ```py
249 | import json
250 | from typing import Any, Sequence
251 |
252 | from meilisearch_python_sdk import Client
253 | from meilisearch_python_sdk.models.search import SearchResults
254 | from meilisearch_python_sdk.plugins import Event, IndexPlugins
255 | from meilisearch_python_sdk.types import JsonMapping
256 |
257 |
258 | class ModifyDocumentPlugin:
259 | POST_EVENT = False
260 | PRE_EVENT = True # Specifies the plugin should be run before adding documents
261 |
262 | def run_document_plugin(
263 | self, event: Event, *, documents: Sequence[JsonMapping], **kwargs: Any
264 | ) -> Sequence[JsonMapping]:
265 | updated = []
266 | for i, document in enumerate(documents):
267 | if i % 2 == 0:
268 | document["access"] = "admin"
269 | else:
270 | document["access"] = "read"
271 |
272 | updated.append(document)
273 |
274 | return updated
275 |
276 |
277 | class FilterSearchResultsPlugin:
278 | POST_EVENT = True # Specifies the plugin should be run after the search
279 | PRE_EVENT = False
280 |
281 | def run_post_search_plugin(
282 | self, event: Event, *, search_results: SearchResults, **kwargs: Any
283 | ) -> SearchResults:
284 | filtered_hits = []
285 | for hit in search_results.hits:
286 | if hit["access"] != "admin":
287 | filtered_hits.append(hit)
288 |
289 | search_results.hits = filtered_hits
290 |
291 | return search_results
292 |
293 |
294 | def main() -> int:
295 | with open("datasets/small_movies.json") as f:
296 | documents = json.load(f)
297 |
298 | client = Client("http://127.0.0.1:7700", "masterKey")
299 | plugins = IndexPlugins(
300 | add_documents_plugins=(ModifyDocumentPlugin(),),
301 | update_documents_plugins=(ModifyDocumentPlugin(),),
302 | search_plugins=(FilterSearchResultsPlugin(),),
303 | )
304 | index = client.create_index("movies", primary_key="id", plugins=plugins)
305 | task = index.add_documents(documents)
306 | client.wait_for_task(task.task_uid)
307 | result = index.search("cars")
308 | print(result) # noqa: T201
309 |
310 | return 0
311 |
312 |
313 | if __name__ == "__main__":
314 | raise SystemExit(main())
315 | ```
316 |
--------------------------------------------------------------------------------
/docs/pydantic.md:
--------------------------------------------------------------------------------
1 | # Pydantic usage
2 |
3 | This package uses [Pydantic](https://pydantic-docs.helpmanual.io/) to serialize/deserialize the JSON
4 | from Meilisearch into Python objects wherever possible, and in the process uses `CamelBase` from
5 | [camel-converter](https://github.com/sanders41/camel-converter) to convert the camelCaseNames from
6 | JSON into more Pythonic snake_case_names.
7 |
8 | In some instances it is not possible to return the data as an object because the structure will be
9 | dependant on your particular dataset and can't be known ahead of time. In these instances you can
10 | either work with the data in the dictionary that is returned, or because you will know the structure
11 | you can generate your own Pydantic models.
12 |
13 | As an example, if you want to get a movie from the
14 | [small movies example](https://github.com/sanders41/meilisearch-python-sdk/blob/main/datasets/small_movies.json)
15 | you could put the results into an object with the following:
16 |
17 | ```py
18 | from datetime import datetime
19 | from typing import Optional
20 |
21 | from camel_converter.pydantic_base import CamelBase
22 | from meilisearch_python_sdk import Client
23 |
24 |
25 | # Inheriting from CamelBase will allow your class to automatically convert
26 | # variables returned from the server in camelCase into snake_case. It will
27 | # also make it a Pydantic Model.
28 | class Movie(CamelBase):
29 | id: int
30 | title: str
31 | poster: str
32 | overview: str
33 | release_date: datetime
34 | genre: Optional[str] = None
35 |
36 |
37 | async with Client("http://127.0.0.1:7700", "masterKey") as client:
38 | index = client.index("movies")
39 | movie_dict = await index.get_document(287947)
40 | movie = Movie(**movie_dict)
41 | ```
42 |
43 | And then the movie variable would contain the movie object with the following information
44 |
45 | ```py
46 | Movie(
47 | id = 287947,
48 | title = "Shazam!",
49 | poster = "https://image.tmdb.org/t/p/w1280/xnopI5Xtky18MPhK40cZAGAOVeV.jpg",
50 | overview = "A boy is given the ability to become an adult superhero in times of need with a single magic word.",
51 | release_date = datetime.datetime(2019, 3, 23, 0, 0, tzinfo=datetime.timezone.utc),
52 | genre = "action",
53 | )
54 | ```
55 |
56 | By inheriting from CamelBase, or any of the other [provided models](https://github.com/sanders41/meilisearch-python-sdk/tree/main/meilisearch_python_sdk/models)
57 | you will be inheriting Pydantic models and therefore have access to the funcitonality Pydantic provides
58 | such as [validators](https://pydantic-docs.helpmanual.io/usage/validators/) and [Fields](https://pydantic-docs.helpmanual.io/usage/model_config/#alias-precedence).
59 | Pydantic will also automatically deserialized the data into the correct data type based on the type
60 | hint provided.
61 |
62 | For `SearchResults`, the `hits` field is generic so you can specify a model that matches your data.
63 | If no type is specified it will default to `JsonDict` (`dict[str, Any]`).
64 |
65 | ```py
66 | from datetime import datetime
67 | from typing import Optional
68 |
69 | from camel_converter.pydantic_base import CamelBase
70 | from meilisearch_python_sdk import Client
71 |
72 |
73 | # Inheriting from CamelBase will allow your class to automatically convert
74 | # variables returned from the server in camelCase into snake_case. It will
75 | # also make it a Pydantic Model.
76 | class Movie(CamelBase):
77 | id: int
78 | title: str
79 | poster: str
80 | overview: str
81 | release_date: datetime
82 | genre: Optional[str] = None
83 |
84 |
85 | async with Client("http://127.0.0.1:7700", "masterKey") as client:
86 | index = client.index("movies", hits_type=Movie)
87 | movies = await index.search("Spiderman")
88 | ```
89 |
90 | `movies.hits` will now have items of type `Movie` instead of `JsonDict`.
91 |
--------------------------------------------------------------------------------
/examples/.gitignore:
--------------------------------------------------------------------------------
1 | *.db
2 | .env
3 | .venv
4 | env/
5 | venv/
6 |
--------------------------------------------------------------------------------
/examples/README.md:
--------------------------------------------------------------------------------
1 | # Meilisearch Python SDK Examples
2 |
3 | To run the examples create and activate a virtual environment inside the `examples` directory
4 | then install the requirements.
5 |
6 | ```sh
7 | pip install -r requirements.txt
8 | ```
9 |
10 | Start a Meilisearch running on locally with a master key set to `masterKey`. Then you can run
11 | the example files, i.e.:
12 |
13 | ```sh
14 | python add_documents_decorator.py
15 | ```
16 |
17 | ## FastAPI Example
18 |
19 | To run the FastAPI example run
20 |
21 | ```sh
22 | fastapi dev fastapi_example.py
23 | ```
24 |
25 | Then go to [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) to test the routes.
26 |
--------------------------------------------------------------------------------
/examples/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sanders41/meilisearch-python-sdk/0db7c9b4c4c366e7a91072bcad6826abf42ce861/examples/__init__.py
--------------------------------------------------------------------------------
/examples/add_documents_decorator.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import json
4 | from typing import Any
5 |
6 | from meilisearch_python_sdk import Client
7 | from meilisearch_python_sdk.decorators import ConnectionInfo, add_documents
8 |
9 |
10 | @add_documents(
11 | index_name="movies",
12 | connection_info=ConnectionInfo(url="http://127.0.0.1:7700", api_key="masterKey"),
13 | )
14 | def load_documents() -> list[dict[str, Any]]:
15 | with open("../datasets/small_movies.json") as f:
16 | documents = json.load(f)
17 |
18 | return documents
19 |
20 |
21 | def main() -> int:
22 | client = Client("http://127.0.0.1:7700", "masterKey")
23 | index = client.create_index("movies", "id")
24 | load_documents()
25 | documents = index.get_documents()
26 |
27 | print(documents) # noqa: T201
28 |
29 | return 0
30 |
31 |
32 | if __name__ == "__main__":
33 | raise SystemExit(main())
34 |
--------------------------------------------------------------------------------
/examples/add_documents_in_batches.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import json
4 |
5 | from meilisearch_python_sdk import Client
6 |
7 |
8 | def main() -> int:
9 | with open("../datasets/small_movies.json") as f:
10 | documents = json.load(f)
11 |
12 | client = Client("http://127.0.0.1:7700", "masterKey")
13 | index = client.index("movies")
14 |
15 | # Meilisearch prefers larger batch sizes so set this as large as you can.
16 | index.add_documents_in_batches(documents, primary_key="id", batch_size=1000)
17 |
18 | return 0
19 |
20 |
21 | if __name__ == "__main__":
22 | raise SystemExit(main())
23 |
--------------------------------------------------------------------------------
/examples/async_add_documents_decorator.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import asyncio
4 | import json
5 | from pathlib import Path
6 | from typing import Any
7 |
8 | import aiofiles
9 |
10 | from meilisearch_python_sdk import AsyncClient
11 | from meilisearch_python_sdk.decorators import ConnectionInfo, async_add_documents
12 |
13 |
14 | @async_add_documents(
15 | index_name="movies",
16 | connection_info=ConnectionInfo(url="http://127.0.0.1:7700", api_key="masterKey"),
17 | )
18 | async def load_documents(
19 | file_path: Path | str = "../datasets/small_movies.json",
20 | ) -> list[dict[str, Any]]:
21 | async with aiofiles.open(file_path) as f:
22 | data = await f.read()
23 | documents = json.loads(data)
24 |
25 | return documents
26 |
27 |
28 | async def main() -> int:
29 | async with AsyncClient("http://127.0.0.1:7700", "masterKey") as client:
30 | index = await client.create_index("movies", "id")
31 | await load_documents()
32 | documents = await index.get_documents()
33 |
34 | print(documents) # noqa: T201
35 |
36 | return 0
37 |
38 |
39 | if __name__ == "__main__":
40 | raise SystemExit(asyncio.run(main()))
41 |
--------------------------------------------------------------------------------
/examples/async_add_documents_in_batches.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import asyncio
4 | import json
5 | from pathlib import Path
6 |
7 | import aiofiles
8 |
9 | from meilisearch_python_sdk import AsyncClient, AsyncIndex
10 | from meilisearch_python_sdk.models.task import TaskInfo
11 |
12 |
13 | async def add_documents_in_batches(
14 | index: AsyncIndex, file_path: Path | str = "../datasets/small_movies.json"
15 | ) -> list[TaskInfo]:
16 | async with aiofiles.open(file_path) as f:
17 | data = await f.read()
18 | documents = json.loads(data)
19 |
20 | # Meilisearch prefers larger batch sizes so set this as large as you can.
21 | return await index.add_documents_in_batches(documents, primary_key="id", batch_size=1000)
22 |
23 |
24 | async def main() -> int:
25 | async with AsyncClient("http://127.0.0.1:7700", "masterKey") as client:
26 | index = await client.create_index("movies", primary_key="id")
27 |
28 | await add_documents_in_batches(index)
29 |
30 | return 0
31 |
32 |
33 | if __name__ == "__main__":
34 | raise SystemExit(asyncio.run(main()))
35 |
--------------------------------------------------------------------------------
/examples/async_documents_and_search_results.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import asyncio
4 | import json
5 | from collections.abc import Sequence
6 | from typing import Any
7 |
8 | import aiofiles
9 |
10 | from meilisearch_python_sdk import AsyncClient
11 | from meilisearch_python_sdk.models.search import SearchResults
12 | from meilisearch_python_sdk.plugins import AsyncEvent, AsyncIndexPlugins
13 | from meilisearch_python_sdk.types import JsonMapping
14 |
15 |
16 | class ModifyDocumentPlugin:
17 | CONCURRENT_EVENT = False
18 | POST_EVENT = False
19 | PRE_EVENT = True # Specifies the plugin should be run before adding documents
20 |
21 | async def run_document_plugin(
22 | self, event: AsyncEvent, *, documents: Sequence[JsonMapping], **kwargs: Any
23 | ) -> Sequence[JsonMapping]:
24 | updated = []
25 | for i, document in enumerate(documents):
26 | if i % 2 == 0:
27 | document["access"] = "admin"
28 | else:
29 | document["access"] = "read"
30 |
31 | updated.append(document)
32 |
33 | return updated
34 |
35 |
36 | class FilterSearchResultsPlugin:
37 | CONCURRENT_EVENT = False
38 | POST_EVENT = True # Specifies the plugin should be run after the search
39 | PRE_EVENT = False
40 |
41 | async def run_post_search_plugin(
42 | self, event: AsyncEvent, *, search_results: SearchResults, **kwargs: Any
43 | ) -> SearchResults:
44 | filtered_hits = []
45 | for hit in search_results.hits:
46 | if hit["access"] != "admin":
47 | filtered_hits.append(hit)
48 |
49 | search_results.hits = filtered_hits
50 |
51 | return search_results
52 |
53 |
54 | async def main() -> int:
55 | async with aiofiles.open("../datasets/small_movies.json") as f:
56 | data = await f.read()
57 | documents = json.loads(data)
58 |
59 | async with AsyncClient("http://127.0.0.1:7700", "masterKey") as client:
60 | plugins = AsyncIndexPlugins(
61 | add_documents_plugins=(ModifyDocumentPlugin(),),
62 | update_documents_plugins=(ModifyDocumentPlugin(),),
63 | search_plugins=(FilterSearchResultsPlugin(),),
64 | )
65 | index = await client.create_index("movies", primary_key="id", plugins=plugins)
66 | task = await index.add_documents(documents)
67 | await client.wait_for_task(task.task_uid)
68 | result = await index.search("cars")
69 | print(result) # noqa: T201
70 |
71 | return 0
72 |
73 |
74 | if __name__ == "__main__":
75 | raise SystemExit(asyncio.run(main()))
76 |
--------------------------------------------------------------------------------
/examples/async_search_tracker.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import asyncio
4 | import json
5 | import sqlite3
6 | from pathlib import Path
7 | from typing import Any
8 |
9 | import aiofiles
10 |
11 | from meilisearch_python_sdk import AsyncClient, AsyncIndex
12 | from meilisearch_python_sdk.models.search import SearchResults
13 | from meilisearch_python_sdk.models.task import TaskInfo
14 | from meilisearch_python_sdk.plugins import AsyncEvent, AsyncIndexPlugins
15 | from meilisearch_python_sdk.types import JsonDict
16 |
17 |
18 | class SearchTrackerPlugin:
19 | CONCURRENT_EVENT = True # Specifies the plugin should be run concurrently with the search
20 | POST_EVENT = False
21 | PRE_EVENT = False
22 |
23 | def __init__(self, db_path: Path | str = "search_tracker.db") -> None:
24 | self.conn = sqlite3.Connection(db_path)
25 | self.create_table()
26 |
27 | def create_table(self) -> None:
28 | try:
29 | cursor = self.conn.cursor()
30 | cursor.execute("CREATE TABLE IF NOT EXISTS searches(query STRING)")
31 | finally:
32 | cursor.close()
33 |
34 | async def run_plugin(self, event: AsyncEvent, **kwargs: Any) -> None:
35 | """Note that this example uses sqlite which does not provide an async driver.
36 |
37 | Typically if you are using the AsyncClient you would also be using an async driver for the
38 | database. sqlite is used in this example for simplicity.
39 | """
40 | if kwargs.get("query"):
41 | self.save_search_query(kwargs["query"])
42 |
43 | def save_search_query(self, query: str) -> None:
44 | try:
45 | cursor = self.conn.cursor()
46 | cursor.execute("INSERT INTO searches VALUES(?)", (query,))
47 | self.conn.commit()
48 | finally:
49 | cursor.close()
50 |
51 |
52 | async def add_documents(
53 | index: AsyncIndex, file_path: Path | str = "../datasets/small_movies.json"
54 | ) -> TaskInfo:
55 | async with aiofiles.open(file_path) as f:
56 | data = await f.read()
57 | documents = json.loads(data)
58 | return await index.add_documents(documents)
59 |
60 |
61 | async def search(index: AsyncIndex, query: str) -> SearchResults[JsonDict]:
62 | return await index.search(query)
63 |
64 |
65 | async def main() -> int:
66 | async with AsyncClient("http://127.0.0.1:7700", "masterKey") as client:
67 | plugins = AsyncIndexPlugins(search_plugins=(SearchTrackerPlugin(),))
68 | index = await client.create_index("movies", primary_key="id", plugins=plugins)
69 | task = await add_documents(index)
70 | await client.wait_for_task(task.task_uid)
71 | result = await index.search("Cars")
72 | print(result) # noqa: T201
73 |
74 | return 0
75 |
76 |
77 | if __name__ == "__main__":
78 | raise SystemExit(asyncio.run(main()))
79 |
--------------------------------------------------------------------------------
/examples/async_update_settings.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import asyncio
4 | import json
5 | from pathlib import Path
6 |
7 | import aiofiles
8 |
9 | from meilisearch_python_sdk import AsyncClient, AsyncIndex
10 | from meilisearch_python_sdk.models.settings import MeilisearchSettings
11 | from meilisearch_python_sdk.models.task import TaskInfo
12 |
13 |
14 | async def add_documents(
15 | index: AsyncIndex, file_path: Path | str = "../datasets/small_movies.json"
16 | ) -> TaskInfo:
17 | async with aiofiles.open(file_path) as f:
18 | data = await f.read()
19 | documents = json.loads(data)
20 |
21 | return await index.add_documents(documents)
22 |
23 |
24 | async def update_settings(index: AsyncIndex) -> TaskInfo:
25 | settings = MeilisearchSettings(
26 | filterable_attributes=["genre"], searchable_attributes=["title", "genre", "overview"]
27 | )
28 |
29 | return await index.update_settings(settings)
30 |
31 |
32 | async def main() -> int:
33 | async with AsyncClient("http://127.0.0.1:7700", "masterKey") as client:
34 | index = await client.create_index("movies", primary_key="id")
35 | task = await update_settings(index)
36 | await client.wait_for_task(task.task_uid)
37 | await add_documents(index)
38 |
39 | return 0
40 |
41 |
42 | if __name__ == "__main__":
43 | raise SystemExit(asyncio.run(main()))
44 |
--------------------------------------------------------------------------------
/examples/documents_and_search_results.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import json
4 | from collections.abc import Sequence
5 | from typing import Any
6 |
7 | from meilisearch_python_sdk import Client
8 | from meilisearch_python_sdk.models.search import SearchResults
9 | from meilisearch_python_sdk.plugins import Event, IndexPlugins
10 | from meilisearch_python_sdk.types import JsonMapping
11 |
12 |
13 | class ModifyDocumentPlugin:
14 | POST_EVENT = False
15 | PRE_EVENT = True # Specifies the plugin should be run before adding documents
16 |
17 | def run_document_plugin(
18 | self, event: Event, *, documents: Sequence[JsonMapping], **kwargs: Any
19 | ) -> Sequence[JsonMapping]:
20 | updated = []
21 | for i, document in enumerate(documents):
22 | if i % 2 == 0:
23 | document["access"] = "admin"
24 | else:
25 | document["access"] = "read"
26 |
27 | updated.append(document)
28 |
29 | return updated
30 |
31 |
32 | class FilterSearchResultsPlugin:
33 | POST_EVENT = True # Specifies the plugin should be run after the search
34 | PRE_EVENT = False
35 |
36 | def run_post_search_plugin(
37 | self, event: Event, *, search_results: SearchResults, **kwargs: Any
38 | ) -> SearchResults:
39 | filtered_hits = []
40 | for hit in search_results.hits:
41 | if hit["access"] != "admin":
42 | filtered_hits.append(hit)
43 |
44 | search_results.hits = filtered_hits
45 |
46 | return search_results
47 |
48 |
49 | def main() -> int:
50 | with open("../datasets/small_movies.json") as f:
51 | documents = json.load(f)
52 |
53 | client = Client("http://127.0.0.1:7700", "masterKey")
54 | plugins = IndexPlugins(
55 | add_documents_plugins=(ModifyDocumentPlugin(),),
56 | update_documents_plugins=(ModifyDocumentPlugin(),),
57 | search_plugins=(FilterSearchResultsPlugin(),),
58 | )
59 | index = client.create_index("movies", primary_key="id", plugins=plugins)
60 | task = index.add_documents(documents)
61 | client.wait_for_task(task.task_uid)
62 | result = index.search("cars")
63 | print(result) # noqa: T201
64 |
65 | return 0
66 |
67 |
68 | if __name__ == "__main__":
69 | raise SystemExit(main())
70 |
--------------------------------------------------------------------------------
/examples/fastapi_example.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from collections.abc import AsyncGenerator
4 | from contextlib import asynccontextmanager
5 | from typing import Annotated, Any
6 |
7 | from fastapi import Depends, FastAPI, HTTPException
8 | from starlette.status import HTTP_500_INTERNAL_SERVER_ERROR
9 |
10 | from meilisearch_python_sdk import AsyncClient, AsyncIndex
11 | from meilisearch_python_sdk.errors import MeilisearchApiError, MeilisearchCommunicationError
12 | from meilisearch_python_sdk.models.documents import DocumentsInfo
13 | from meilisearch_python_sdk.models.health import Health
14 | from meilisearch_python_sdk.models.search import SearchParams, SearchResults
15 | from meilisearch_python_sdk.models.task import TaskInfo
16 |
17 |
18 | @asynccontextmanager
19 | async def lifespan(app: FastAPI) -> AsyncGenerator:
20 | try:
21 | health = await client.health()
22 | if health.status != "available":
23 | raise HTTPException(
24 | status_code=HTTP_500_INTERNAL_SERVER_ERROR,
25 | detail="The Meilisearch server is not available",
26 | )
27 | except MeilisearchCommunicationError as e:
28 | raise HTTPException(
29 | status_code=HTTP_500_INTERNAL_SERVER_ERROR,
30 | detail="Unable to connect to the Meilisearch server",
31 | ) from e
32 | yield
33 | await client.aclose() # Shutdown the client when exiting the app
34 |
35 |
36 | app = FastAPI(lifespan=lifespan)
37 | client = AsyncClient("http://127.0.0.1:7700", "masterKey")
38 |
39 |
40 | async def get_index() -> AsyncIndex:
41 | try:
42 | index = await client.get_index("movies")
43 | except MeilisearchApiError as e:
44 | if e.status_code == 404: # If the index movies does not already exist create it
45 | index = await client.create_index("movies", primary_key="id")
46 | else:
47 | raise
48 | return index
49 |
50 |
51 | @app.get("/health")
52 | async def check_health() -> Health:
53 | return await client.health()
54 |
55 |
56 | @app.get("/documents")
57 | async def get_documents(index: Annotated[AsyncIndex, Depends(get_index)]) -> DocumentsInfo:
58 | return await index.get_documents()
59 |
60 |
61 | @app.post("/documents")
62 | async def add_documents(
63 | documents: list[dict[str, Any]], index: Annotated[AsyncIndex, Depends(get_index)]
64 | ) -> list[TaskInfo]:
65 | return await index.add_documents_in_batches(documents)
66 |
67 |
68 | @app.post("/search")
69 | async def search(
70 | search_params: SearchParams, index: Annotated[AsyncIndex, Depends(get_index)]
71 | ) -> SearchResults:
72 | params = search_params.model_dump()
73 | del params["index_uid"]
74 | return await index.search(**params)
75 |
--------------------------------------------------------------------------------
/examples/orjson_example.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from pathlib import Path
4 |
5 | from meilisearch_python_sdk import Client
6 | from meilisearch_python_sdk.json_handler import OrjsonHandler
7 | from meilisearch_python_sdk.models.task import TaskInfo
8 |
9 |
10 | def add_documents(file_path: Path | str = "../datasets/small_movies.json") -> TaskInfo:
11 | client = Client("http://127.0.0.1:7700", "masterKey", json_handler=OrjsonHandler())
12 | index = client.create_index("movies", primary_key="id")
13 | return index.add_documents_from_file(file_path)
14 |
15 |
16 | def main() -> int:
17 | add_documents()
18 |
19 | return 0
20 |
21 |
22 | if __name__ == "__main__":
23 | raise SystemExit(main())
24 |
--------------------------------------------------------------------------------
/examples/pyproject.toml:
--------------------------------------------------------------------------------
1 | [tool.pytest.ini_options]
2 | minversion = "6.0"
3 | asyncio_mode = "auto"
4 | asyncio_default_fixture_loop_scope = "session"
5 |
--------------------------------------------------------------------------------
/examples/requirements.txt:
--------------------------------------------------------------------------------
1 | ../.[all]
2 | fastapi==0.115.6
3 | pytest==8.3.4
4 | pytest-asyncio==0.25.0
5 |
--------------------------------------------------------------------------------
/examples/search_tracker.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import json
4 | import sqlite3
5 | from pathlib import Path
6 | from typing import Any
7 |
8 | from meilisearch_python_sdk import Client, Index
9 | from meilisearch_python_sdk.models.search import SearchResults
10 | from meilisearch_python_sdk.models.task import TaskInfo
11 | from meilisearch_python_sdk.plugins import Event, IndexPlugins
12 | from meilisearch_python_sdk.types import JsonDict
13 |
14 |
15 | class SearchTrackerPlugin:
16 | POST_EVENT = False
17 | PRE_EVENT = True # Specifies the plugin should be run before the search
18 |
19 | def __init__(self, db_path: Path | str = "search_tracker.db") -> None:
20 | self.conn = sqlite3.Connection(db_path)
21 | self.create_table()
22 |
23 | def create_table(self) -> None:
24 | try:
25 | cursor = self.conn.cursor()
26 | cursor.execute("CREATE TABLE IF NOT EXISTS searches(query STRING)")
27 | finally:
28 | cursor.close()
29 |
30 | def run_plugin(self, event: Event, **kwargs: Any) -> None:
31 | if kwargs.get("query"):
32 | self.save_search_query(kwargs["query"])
33 |
34 | def save_search_query(self, query: str) -> None:
35 | try:
36 | cursor = self.conn.cursor()
37 | cursor.execute("INSERT INTO searches VALUES(?)", (query,))
38 | self.conn.commit()
39 | finally:
40 | cursor.close()
41 |
42 |
43 | def add_documents(
44 | index: Index, file_path: Path | str = "../datasets/small_movies.json"
45 | ) -> TaskInfo:
46 | with open(file_path) as f:
47 | documents = json.load(f)
48 | return index.add_documents(documents)
49 |
50 |
51 | def search(index: Index, query: str) -> SearchResults[JsonDict]:
52 | return index.search(query)
53 |
54 |
55 | def main() -> int:
56 | client = Client("http://127.0.0.1:7700", "masterKey")
57 | plugins = IndexPlugins(search_plugins=(SearchTrackerPlugin(),))
58 | index = client.create_index("movies", primary_key="id", plugins=plugins)
59 | task = add_documents(index)
60 | client.wait_for_task(task.task_uid)
61 | result = search(index, "Cars")
62 | print(result) # noqa: T201
63 |
64 | return 0
65 |
66 |
67 | if __name__ == "__main__":
68 | raise SystemExit(main())
69 |
--------------------------------------------------------------------------------
/examples/tests/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sanders41/meilisearch-python-sdk/0db7c9b4c4c366e7a91072bcad6826abf42ce861/examples/tests/__init__.py
--------------------------------------------------------------------------------
/examples/tests/conftest.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 | from uuid import uuid4
3 |
4 | import pytest
5 | from pytest_asyncio import is_async_test
6 |
7 | from meilisearch_python_sdk import AsyncClient, Client
8 |
9 | MASTER_KEY = "masterKey"
10 |
11 | ROOT_PATH = Path().absolute()
12 | SMALL_MOVIES_PATH = ROOT_PATH.parent / "datasets" / "small_movies.json"
13 |
14 |
15 | def pytest_collection_modifyitems(items):
16 | pytest_asyncio_tests = (item for item in items if is_async_test(item))
17 | session_scope_marker = pytest.mark.asyncio(loop_scope="session")
18 | for async_test in pytest_asyncio_tests:
19 | async_test.add_marker(session_scope_marker, append=False)
20 |
21 |
22 | @pytest.fixture(scope="session")
23 | def base_url():
24 | return "http://127.0.0.1:7700"
25 |
26 |
27 | @pytest.fixture(scope="session")
28 | async def async_client(base_url):
29 | async with AsyncClient(base_url, MASTER_KEY) as client:
30 | yield client
31 |
32 |
33 | @pytest.fixture(scope="session")
34 | def client(base_url):
35 | yield Client(base_url, MASTER_KEY)
36 |
37 |
38 | @pytest.fixture(scope="session")
39 | def small_movies_path():
40 | return SMALL_MOVIES_PATH
41 |
42 |
43 | @pytest.fixture
44 | async def async_empty_index(async_client):
45 | async def index_maker():
46 | return await async_client.create_index(uid=str(uuid4()), timeout_in_ms=5000)
47 |
48 | return index_maker
49 |
50 |
51 | @pytest.fixture
52 | def empty_index(client):
53 | def index_maker():
54 | return client.create_index(uid=str(uuid4()), timeout_in_ms=5000)
55 |
56 | return index_maker
57 |
--------------------------------------------------------------------------------
/examples/tests/test_async_examples.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | from uuid import uuid4
3 |
4 | from examples.async_add_documents_decorator import load_documents
5 | from examples.async_add_documents_in_batches import add_documents_in_batches
6 | from examples.async_search_tracker import SearchTrackerPlugin, search
7 | from examples.async_search_tracker import add_documents as search_tracker_add_documents
8 | from examples.async_update_settings import add_documents as update_settings_add_documents
9 | from examples.async_update_settings import update_settings
10 | from meilisearch_python_sdk.plugins import AsyncIndexPlugins
11 |
12 |
13 | async def test_add_documents_decorator(small_movies_path, async_client):
14 | index = await async_client.create_index("movies", "id")
15 | await load_documents(small_movies_path)
16 | result = await async_client.get_tasks()
17 | await asyncio.gather(*[async_client.wait_for_task(x.uid) for x in result.results])
18 | documents = await index.get_documents()
19 |
20 | assert len(documents.results) > 0
21 |
22 |
23 | async def test_add_documents_in_batchees(small_movies_path, async_empty_index, async_client):
24 | index = await async_empty_index()
25 | tasks = await add_documents_in_batches(index, small_movies_path)
26 | for task in tasks:
27 | await async_client.wait_for_task(task.task_uid)
28 | result = await async_client.get_task(task.task_uid)
29 | assert result.status == "succeeded"
30 |
31 |
32 | async def test_search_tracker(small_movies_path, async_client, tmp_path):
33 | db_path = tmp_path / "search_tracker.db"
34 | plugins = AsyncIndexPlugins(search_plugins=(SearchTrackerPlugin(db_path),))
35 | index = await async_client.create_index(
36 | uid=str(uuid4()), primary_key="id", plugins=plugins, timeout_in_ms=5000
37 | )
38 | task = await search_tracker_add_documents(index, small_movies_path)
39 | await async_client.wait_for_task(task.task_uid)
40 | result = await async_client.get_task(task.task_uid)
41 | assert result.status == "succeeded"
42 | result = await search(index, "Cars")
43 | assert len(result.hits) > 0
44 |
45 |
46 | async def test_update_settings(small_movies_path, async_empty_index, async_client):
47 | index = await async_empty_index()
48 | task = await update_settings(index)
49 | await async_client.wait_for_task(task.task_uid)
50 | task = await update_settings_add_documents(index, small_movies_path)
51 | await async_client.wait_for_task(task.task_uid)
52 | result = await async_client.get_task(task.task_uid)
53 | assert result.status == "succeeded"
54 |
--------------------------------------------------------------------------------
/examples/tests/test_examples.py:
--------------------------------------------------------------------------------
1 | from uuid import uuid4
2 |
3 | from examples.orjson_example import add_documents as orjson_add_documents
4 | from examples.search_tracker import SearchTrackerPlugin, search
5 | from examples.search_tracker import add_documents as search_tracker_add_documents
6 | from examples.ujson_example import add_documents as ujson_add_documents
7 | from examples.update_settings import add_documents as update_settings_add_documents
8 | from examples.update_settings import update_settings
9 | from meilisearch_python_sdk.plugins import IndexPlugins
10 |
11 |
12 | def test_orjson_example(small_movies_path, client):
13 | task = orjson_add_documents(small_movies_path)
14 | client.wait_for_task(task.task_uid)
15 | result = client.get_task(task.task_uid)
16 | assert result.status == "succeeded"
17 |
18 |
19 | def test_search_tracker(small_movies_path, client, tmp_path):
20 | db_path = tmp_path / "search_tracker.db"
21 | plugins = IndexPlugins(search_plugins=(SearchTrackerPlugin(db_path),))
22 | index = client.create_index(
23 | uid=str(uuid4()), primary_key="id", plugins=plugins, timeout_in_ms=5000
24 | )
25 | task = search_tracker_add_documents(index, small_movies_path)
26 | client.wait_for_task(task.task_uid)
27 | result = client.get_task(task.task_uid)
28 | assert result.status == "succeeded"
29 | result = search(index, "Cars")
30 | assert len(result.hits) > 0
31 |
32 |
33 | def test_update_settings(small_movies_path, empty_index, client):
34 | index = empty_index()
35 | task = update_settings(index)
36 | client.wait_for_task(task.task_uid)
37 | task = update_settings_add_documents(index, small_movies_path)
38 | client.wait_for_task(task.task_uid)
39 | result = client.get_task(task.task_uid)
40 | assert result.status == "succeeded"
41 |
42 |
43 | def test_ujson_example(small_movies_path, client):
44 | task = ujson_add_documents(small_movies_path)
45 | client.wait_for_task(task.task_uid)
46 | result = client.get_task(task.task_uid)
47 | assert result.status == "succeeded"
48 |
--------------------------------------------------------------------------------
/examples/ujson_example.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from pathlib import Path
4 |
5 | from meilisearch_python_sdk import Client
6 | from meilisearch_python_sdk.json_handler import UjsonHandler
7 | from meilisearch_python_sdk.models.task import TaskInfo
8 |
9 |
10 | def add_documents(file_path: Path | str = "../datasets/small_movies.json") -> TaskInfo:
11 | client = Client("http://127.0.0.1:7700", "masterKey", json_handler=UjsonHandler())
12 | index = client.create_index("movies", primary_key="id")
13 | return index.add_documents_from_file(file_path)
14 |
15 |
16 | def main() -> int:
17 | add_documents()
18 |
19 | return 0
20 |
21 |
22 | if __name__ == "__main__":
23 | raise SystemExit(main())
24 |
--------------------------------------------------------------------------------
/examples/update_settings.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import json
4 | from pathlib import Path
5 |
6 | from meilisearch_python_sdk import Client, Index
7 | from meilisearch_python_sdk.models.settings import MeilisearchSettings
8 | from meilisearch_python_sdk.models.task import TaskInfo
9 |
10 |
11 | def add_documents(
12 | index: Index, file_path: Path | str = "../datasets/small_movies.json"
13 | ) -> TaskInfo:
14 | with open(file_path) as f:
15 | documents = json.load(f)
16 | return index.add_documents(documents)
17 |
18 |
19 | def update_settings(index: Index) -> TaskInfo:
20 | settings = MeilisearchSettings(
21 | filterable_attributes=["genre"], searchable_attributes=["title", "genre", "overview"]
22 | )
23 |
24 | return index.update_settings(settings)
25 |
26 |
27 | def main() -> int:
28 | client = Client("http://127.0.0.1:7700", "masterKey")
29 | index = client.create_index("movies", primary_key="id")
30 | task = update_settings(index)
31 | client.wait_for_task(task.task_uid)
32 | add_documents(index)
33 |
34 | return 0
35 |
36 |
37 | if __name__ == "__main__":
38 | raise SystemExit(main())
39 |
--------------------------------------------------------------------------------
/justfile:
--------------------------------------------------------------------------------
1 | @_default:
2 | just --list
3 |
4 | @lint:
5 | echo mypy
6 | just --justfile {{justfile()}} mypy
7 | echo ruff
8 | just --justfile {{justfile()}} ruff
9 | echo ruff-format
10 | just --justfile {{justfile()}} ruff-format
11 |
12 | @mypy:
13 | uv run mypy meilisearch_python_sdk tests
14 |
15 | @ruff:
16 | uv run ruff check .
17 |
18 | @ruff-format:
19 | uv run ruff format meilisearch_python_sdk tests examples
20 |
21 | @test *args="":
22 | -uv run pytest {{args}}
23 |
24 | @test-http2 *args="":
25 | -uv run pytest --http2 {{args}}
26 |
27 | @test-parallel *args="":
28 | -uv run pytest -n auto -m "not no_parallel" {{args}}
29 |
30 | @test-no-parallel *args="":
31 | -uv run pytest -m "no_parallel" {{args}}
32 |
33 | @test-parallel-http2 *args="":
34 | -uv run pytest -n auto -m "not no_parallel" --http2 {{args}}
35 |
36 | @test-no-parallel-http2 *args="":
37 | -uv run pytest -m "no_parallel" --http2 {{args}}
38 |
39 | @test-ci: start-meilisearch-detached && stop-meilisearch
40 | uv run pytest --cov=meilisearch_python_sdk --cov-report=xml
41 |
42 | @test-parallel-ci: start-meilisearch-detached && stop-meilisearch
43 | uv run pytest --cov=meilisearch_python_sdk --cov-report=xml -n auto -m "not no_parallel"
44 |
45 | @test-no-parallel-ci: start-meilisearch-detached && stop-meilisearch
46 | uv run pytest --cov=meilisearch_python_sdk --cov-report=xml -m "no_parallel"
47 |
48 | @test-parallel-ci-http2: start-meilisearch-detached-http2 && stop-meilisearch-http2
49 | uv run pytest --cov=meilisearch_python_sdk --cov-report=xml -n auto -m "not no_parallel" --http2
50 |
51 | @test-no-parallel-ci-http2: start-meilisearch-detached-http2 && stop-meilisearch-http2
52 | uv run pytest --cov=meilisearch_python_sdk --cov-report=xml -m "no_parallel" --http2
53 |
54 | @test-examples-ci: start-meilisearch-detached
55 | cd examples && \
56 | pip install -r requirements.txt && \
57 | pytest
58 |
59 | @start-meilisearch:
60 | docker compose up
61 |
62 | @start-meilisearch-detached:
63 | docker compose up -d
64 |
65 | @stop-meilisearch:
66 | docker compose down
67 |
68 | @start-meilisearch-http2:
69 | docker compose -f docker-compose.https.yml up
70 |
71 | @start-meilisearch-detached-http2:
72 | docker compose -f docker-compose.https.yml up -d
73 |
74 | @stop-meilisearch-http2:
75 | docker compose -f docker-compose.https.yml down
76 |
77 | @build-docs:
78 | uv run mkdocs build --strict
79 |
80 | @serve-docs:
81 | mkdocs serve
82 |
83 | @install:
84 | uv sync --frozen --all-extras
85 |
86 | @lock:
87 | uv lock
88 |
89 | @lock-upgrade:
90 | uv lock --upgrade
91 |
92 | @benchmark: start-meilisearch-detached && stop-meilisearch
93 | -uv run benchmark/run_benchmark.py
94 |
--------------------------------------------------------------------------------
/meilisearch_python_sdk/__init__.py:
--------------------------------------------------------------------------------
1 | from meilisearch_python_sdk._client import AsyncClient, Client
2 | from meilisearch_python_sdk._version import VERSION
3 | from meilisearch_python_sdk.index import AsyncIndex, Index
4 |
5 | __version__ = VERSION
6 |
7 |
8 | __all__ = ["AsyncClient", "AsyncIndex", "Client", "Index"]
9 |
--------------------------------------------------------------------------------
/meilisearch_python_sdk/_batch.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from datetime import datetime
4 | from typing import TYPE_CHECKING
5 |
6 | from meilisearch_python_sdk._utils import get_async_client, get_client
7 | from meilisearch_python_sdk.errors import BatchNotFoundError
8 | from meilisearch_python_sdk.models.batch import BatchResult, BatchStatus
9 |
10 | if TYPE_CHECKING:
11 | from httpx import AsyncClient as HttpxAsyncClient # pragma: no cover
12 | from httpx import Client as HttpxClient # pragma: no cover
13 |
14 | from meilisearch_python_sdk._client import ( # pragma: no cover
15 | AsyncClient,
16 | Client,
17 | )
18 |
19 |
20 | async def async_get_batch(
21 | client: HttpxAsyncClient | AsyncClient, batch_uid: int
22 | ) -> BatchResult | None:
23 | client_ = get_async_client(client)
24 | response = await client_.get(f"batches/{batch_uid}")
25 |
26 | if response.status_code == 404:
27 | raise BatchNotFoundError(f"Batch {batch_uid} not found")
28 |
29 | return BatchResult(**response.json())
30 |
31 |
32 | async def async_get_batches(
33 | client: HttpxAsyncClient | AsyncClient,
34 | *,
35 | uids: list[int] | None = None,
36 | batch_uids: list[int] | None = None,
37 | index_uids: list[int] | None = None,
38 | statuses: list[str] | None = None,
39 | types: list[str] | None = None,
40 | limit: int = 20,
41 | from_: str | None = None,
42 | reverse: bool = False,
43 | before_enqueued_at: datetime | None = None,
44 | after_enqueued_at: datetime | None = None,
45 | before_started_at: datetime | None = None,
46 | after_finished_at: datetime | None = None,
47 | ) -> BatchStatus:
48 | client_ = get_async_client(client)
49 | params = _build_parameters(
50 | uids=uids,
51 | batch_uids=batch_uids,
52 | index_uids=index_uids,
53 | statuses=statuses,
54 | types=types,
55 | limit=limit,
56 | from_=from_,
57 | reverse=reverse,
58 | before_enqueued_at=before_enqueued_at,
59 | after_enqueued_at=after_enqueued_at,
60 | before_started_at=before_started_at,
61 | after_finished_at=after_finished_at,
62 | )
63 | response = await client_.get("batches", params=params)
64 |
65 | return BatchStatus(**response.json())
66 |
67 |
68 | def get_batch(client: HttpxClient | Client, batch_uid: int) -> BatchResult | None:
69 | client_ = get_client(client)
70 | response = client_.get(f"batches/{batch_uid}")
71 |
72 | if response.status_code == 404:
73 | raise BatchNotFoundError(f"Batch {batch_uid} not found")
74 |
75 | return BatchResult(**response.json())
76 |
77 |
78 | def get_batches(
79 | client: HttpxClient | Client,
80 | *,
81 | uids: list[int] | None = None,
82 | batch_uids: list[int] | None = None,
83 | index_uids: list[int] | None = None,
84 | statuses: list[str] | None = None,
85 | types: list[str] | None = None,
86 | limit: int = 20,
87 | from_: str | None = None,
88 | reverse: bool = False,
89 | before_enqueued_at: datetime | None = None,
90 | after_enqueued_at: datetime | None = None,
91 | before_started_at: datetime | None = None,
92 | after_finished_at: datetime | None = None,
93 | ) -> BatchStatus:
94 | client_ = get_client(client)
95 | params = _build_parameters(
96 | uids=uids,
97 | batch_uids=batch_uids,
98 | index_uids=index_uids,
99 | statuses=statuses,
100 | types=types,
101 | limit=limit,
102 | from_=from_,
103 | reverse=reverse,
104 | before_enqueued_at=before_enqueued_at,
105 | after_enqueued_at=after_enqueued_at,
106 | before_started_at=before_started_at,
107 | after_finished_at=after_finished_at,
108 | )
109 |
110 | response = client_.get("batches", params=params)
111 |
112 | return BatchStatus(**response.json())
113 |
114 |
115 | def _build_parameters(
116 | *,
117 | uids: list[int] | None = None,
118 | batch_uids: list[int] | None = None,
119 | index_uids: list[int] | None = None,
120 | statuses: list[str] | None = None,
121 | types: list[str] | None = None,
122 | limit: int = 20,
123 | from_: str | None = None,
124 | reverse: bool = False,
125 | before_enqueued_at: datetime | None = None,
126 | after_enqueued_at: datetime | None = None,
127 | before_started_at: datetime | None = None,
128 | after_finished_at: datetime | None = None,
129 | ) -> dict[str, str]:
130 | params = {}
131 |
132 | if uids:
133 | params["uids"] = ",".join([str(uid) for uid in uids])
134 |
135 | if batch_uids: # pragma: no cover
136 | params["batchUids"] = ",".join([str(uid) for uid in batch_uids])
137 |
138 | if index_uids: # pragma: no cover
139 | params["indexUids"] = ",".join([str(uid) for uid in index_uids])
140 |
141 | if statuses: # pragma: no cover
142 | params["statuses"] = ",".join(statuses)
143 |
144 | if types: # pragma: no cover
145 | params["types"] = ",".join(types)
146 |
147 | params["limit"] = str(limit)
148 |
149 | if from_: # pragma: no cover
150 | params["from"] = from_
151 |
152 | params["reverse"] = "true" if reverse else "false"
153 |
154 | if before_enqueued_at: # pragma: no cover
155 | params["beforeEnqueuedAt"] = before_enqueued_at.isoformat()
156 |
157 | if after_enqueued_at: # pragma: no cover
158 | params["afterEnqueuedAt"] = after_enqueued_at.isoformat()
159 |
160 | if before_started_at: # pragma: no cover
161 | params["beforeStartedAt"] = before_started_at.isoformat()
162 |
163 | if after_finished_at: # pragma: no cover
164 | params["afterFinishedAt"] = after_finished_at.isoformat()
165 |
166 | return params
167 |
--------------------------------------------------------------------------------
/meilisearch_python_sdk/_http_requests.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import gzip
4 | from functools import lru_cache
5 | from typing import Any, Callable
6 |
7 | from httpx import (
8 | AsyncClient,
9 | Client,
10 | ConnectError,
11 | ConnectTimeout,
12 | HTTPError,
13 | RemoteProtocolError,
14 | Response,
15 | )
16 |
17 | from meilisearch_python_sdk._version import VERSION
18 | from meilisearch_python_sdk.errors import (
19 | MeilisearchApiError,
20 | MeilisearchCommunicationError,
21 | MeilisearchError,
22 | )
23 | from meilisearch_python_sdk.json_handler import BuiltinHandler, OrjsonHandler, UjsonHandler
24 |
25 |
26 | class AsyncHttpRequests:
27 | def __init__(
28 | self, http_client: AsyncClient, json_handler: BuiltinHandler | OrjsonHandler | UjsonHandler
29 | ) -> None:
30 | self.http_client = http_client
31 | self.json_handler = json_handler
32 |
33 | async def _send_request(
34 | self,
35 | http_method: Callable,
36 | path: str,
37 | body: Any | None = None,
38 | content_type: str = "application/json",
39 | compress: bool = False,
40 | ) -> Response:
41 | headers = build_headers(content_type, compress)
42 |
43 | try:
44 | if body is None:
45 | response = await http_method(path)
46 | elif content_type == "application/json" and not compress:
47 | response = await http_method(
48 | path, content=self.json_handler.dumps(body), headers=headers
49 | )
50 | else:
51 | if body and compress:
52 | if content_type == "application/json":
53 | body = gzip.compress(self.json_handler.dumps(body).encode("utf-8"))
54 | else:
55 | body = gzip.compress((body).encode("utf-8"))
56 | response = await http_method(path, content=body, headers=headers)
57 |
58 | response.raise_for_status()
59 | return response
60 |
61 | except (ConnectError, ConnectTimeout, RemoteProtocolError) as err:
62 | raise MeilisearchCommunicationError(str(err)) from err
63 | except HTTPError as err:
64 | if "response" in locals():
65 | if "application/json" in response.headers.get("content-type", ""):
66 | raise MeilisearchApiError(str(err), response) from err
67 | else:
68 | raise
69 | else:
70 | # Fail safe just in case error happens before response is created
71 | raise MeilisearchError(str(err)) from err # pragma: no cover
72 |
73 | async def get(self, path: str) -> Response:
74 | return await self._send_request(self.http_client.get, path)
75 |
76 | async def patch(
77 | self,
78 | path: str,
79 | body: Any | None = None,
80 | content_type: str = "application/json",
81 | compress: bool = False,
82 | ) -> Response:
83 | return await self._send_request(self.http_client.patch, path, body, content_type, compress)
84 |
85 | async def post(
86 | self,
87 | path: str,
88 | body: Any | None = None,
89 | content_type: str = "application/json",
90 | compress: bool = False,
91 | ) -> Response:
92 | return await self._send_request(self.http_client.post, path, body, content_type, compress)
93 |
94 | async def put(
95 | self,
96 | path: str,
97 | body: Any | None = None,
98 | content_type: str = "application/json",
99 | compress: bool = False,
100 | ) -> Response:
101 | return await self._send_request(self.http_client.put, path, body, content_type, compress)
102 |
103 | async def delete(self, path: str, body: dict | None = None) -> Response:
104 | return await self._send_request(self.http_client.delete, path, body)
105 |
106 |
107 | class HttpRequests:
108 | def __init__(
109 | self, http_client: Client, json_handler: BuiltinHandler | OrjsonHandler | UjsonHandler
110 | ) -> None:
111 | self.http_client = http_client
112 | self.json_handler = json_handler
113 |
114 | def _send_request(
115 | self,
116 | http_method: Callable,
117 | path: str,
118 | body: Any | None = None,
119 | content_type: str = "application/json",
120 | compress: bool = False,
121 | ) -> Response:
122 | headers = build_headers(content_type, compress)
123 | try:
124 | if body is None:
125 | response = http_method(path)
126 | elif content_type == "application/json" and not compress:
127 | response = http_method(path, content=self.json_handler.dumps(body), headers=headers)
128 | else:
129 | if body and compress:
130 | if content_type == "application/json":
131 | body = gzip.compress(self.json_handler.dumps(body).encode("utf-8"))
132 | else:
133 | body = gzip.compress((body).encode("utf-8"))
134 | response = http_method(path, content=body, headers=headers)
135 |
136 | response.raise_for_status()
137 | return response
138 |
139 | except (ConnectError, ConnectTimeout, RemoteProtocolError) as err:
140 | raise MeilisearchCommunicationError(str(err)) from err
141 | except HTTPError as err:
142 | if "response" in locals():
143 | if "application/json" in response.headers.get("content-type", ""):
144 | raise MeilisearchApiError(str(err), response) from err
145 | else:
146 | raise
147 | else:
148 | # Fail safe just in case error happens before response is created
149 | raise MeilisearchError(str(err)) from err # pragma: no cover
150 |
151 | def get(self, path: str) -> Response:
152 | return self._send_request(self.http_client.get, path)
153 |
154 | def patch(
155 | self,
156 | path: str,
157 | body: Any | None = None,
158 | content_type: str = "application/json",
159 | compress: bool = False,
160 | ) -> Response:
161 | return self._send_request(self.http_client.patch, path, body, content_type, compress)
162 |
163 | def post(
164 | self,
165 | path: str,
166 | body: Any | None = None,
167 | content_type: str = "application/json",
168 | compress: bool = False,
169 | ) -> Response:
170 | return self._send_request(self.http_client.post, path, body, content_type, compress)
171 |
172 | def put(
173 | self,
174 | path: str,
175 | body: Any | None = None,
176 | content_type: str = "application/json",
177 | compress: bool = False,
178 | ) -> Response:
179 | return self._send_request(self.http_client.put, path, body, content_type, compress)
180 |
181 | def delete(self, path: str, body: dict | None = None) -> Response:
182 | return self._send_request(self.http_client.delete, path, body)
183 |
184 |
185 | def build_headers(content_type: str, compress: bool) -> dict[str, str]:
186 | headers = {"user-agent": user_agent(), "Content-Type": content_type}
187 |
188 | if compress:
189 | headers["Content-Encoding"] = "gzip"
190 |
191 | return headers
192 |
193 |
194 | @lru_cache(maxsize=1)
195 | def user_agent() -> str:
196 | return f"Meilisearch Python SDK (v{VERSION})"
197 |
--------------------------------------------------------------------------------
/meilisearch_python_sdk/_task.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import asyncio
4 | import time
5 | from datetime import datetime
6 | from typing import TYPE_CHECKING
7 | from urllib.parse import urlencode
8 |
9 | from httpx import AsyncClient as HttpxAsyncClient
10 | from httpx import Client as HttpxClient
11 |
12 | from meilisearch_python_sdk._http_requests import AsyncHttpRequests, HttpRequests
13 | from meilisearch_python_sdk._utils import get_async_client, get_client
14 | from meilisearch_python_sdk.errors import MeilisearchTaskFailedError, MeilisearchTimeoutError
15 | from meilisearch_python_sdk.json_handler import BuiltinHandler, OrjsonHandler, UjsonHandler
16 | from meilisearch_python_sdk.models.task import TaskInfo, TaskResult, TaskStatus
17 |
18 | if TYPE_CHECKING:
19 | from meilisearch_python_sdk._client import AsyncClient, Client # pragma: no cover
20 |
21 |
22 | async def async_cancel_tasks(
23 | client: HttpxAsyncClient | AsyncClient,
24 | *,
25 | uids: list[int] | None = None,
26 | index_uids: list[int] | None = None,
27 | statuses: list[str] | None = None,
28 | types: list[str] | None = None,
29 | before_enqueued_at: datetime | None = None,
30 | after_enqueued_at: datetime | None = None,
31 | before_started_at: datetime | None = None,
32 | after_finished_at: datetime | None = None,
33 | ) -> TaskInfo:
34 | """Cancel a list of enqueued or processing tasks.
35 |
36 | Defaults to cancelling all tasks.
37 |
38 | Args:
39 | client: An httpx HttpxAsyncClient or meilisearch_python_sdk AsyncClient instance.
40 | uids: A list of task UIDs to cancel.
41 | index_uids: A list of index UIDs for which to cancel tasks.
42 | statuses: A list of statuses to cancel.
43 | types: A list of types to cancel.
44 | before_enqueued_at: Cancel tasks that were enqueued before the specified date time.
45 | after_enqueued_at: Cancel tasks that were enqueued after the specified date time.
46 | before_started_at: Cancel tasks that were started before the specified date time.
47 | after_finished_at: Cancel tasks that were finished after the specified date time.
48 |
49 | Returns:
50 | The details of the task
51 |
52 | Raises:
53 | MeilisearchCommunicationError: If there was an error communicating with the server.
54 | MeilisearchApiError: If the Meilisearch API returned an error.
55 | MeilisearchTimeoutError: If the connection times out.
56 |
57 | Examples
58 | >>> from meilisearch_python_sdk import AsyncClient
59 | >>> from meilisearch_python_sdk.task import cancel_tasks
60 | >>>
61 | >>> async with AsyncClient("http://localhost.com", "masterKey") as client:
62 | >>> await cancel_tasks(client, uids=[1, 2])
63 | """
64 | parameters = _process_params(
65 | uids,
66 | index_uids,
67 | statuses,
68 | types,
69 | before_enqueued_at,
70 | after_enqueued_at,
71 | before_started_at,
72 | after_finished_at,
73 | )
74 |
75 | if not parameters:
76 | # Cancel all tasks if no parmaeters provided
77 | parameters["statuses"] = "enqueued,processing"
78 |
79 | url = f"tasks/cancel?{urlencode(parameters)}"
80 | client_ = get_async_client(client)
81 | response = await client_.post(url)
82 |
83 | return TaskInfo(**response.json())
84 |
85 |
86 | async def async_delete_tasks(
87 | client: HttpxAsyncClient | AsyncClient,
88 | *,
89 | uids: list[int] | None = None,
90 | index_uids: list[int] | None = None,
91 | statuses: list[str] | None = None,
92 | types: list[str] | None = None,
93 | before_enqueued_at: datetime | None = None,
94 | after_enqueued_at: datetime | None = None,
95 | before_started_at: datetime | None = None,
96 | after_finished_at: datetime | None = None,
97 | ) -> TaskInfo:
98 | parameters = _process_params(
99 | uids,
100 | index_uids,
101 | statuses,
102 | types,
103 | before_enqueued_at,
104 | after_enqueued_at,
105 | before_started_at,
106 | after_finished_at,
107 | )
108 |
109 | if not parameters:
110 | # delete all tasks if no parmaeters provided
111 | parameters["statuses"] = "canceled,enqueued,failed,processing,succeeded"
112 |
113 | url = f"tasks?{urlencode(parameters)}"
114 | client_ = get_async_client(client)
115 | response = await client_.delete(url)
116 |
117 | return TaskInfo(**response.json())
118 |
119 |
120 | async def async_get_task(
121 | client: HttpxAsyncClient | AsyncClient,
122 | task_id: int,
123 | ) -> TaskResult:
124 | client_ = get_async_client(client)
125 | response = await client_.get(f"tasks/{task_id}")
126 |
127 | return TaskResult(**response.json())
128 |
129 |
130 | async def async_get_tasks(
131 | client: HttpxAsyncClient | AsyncClient,
132 | *,
133 | index_ids: list[str] | None = None,
134 | types: str | list[str] | None = None,
135 | reverse: bool | None = None,
136 | ) -> TaskStatus:
137 | url = f"tasks?indexUids={','.join(index_ids)}" if index_ids else "tasks"
138 | if types:
139 | formatted_types = ",".join(types) if isinstance(types, list) else types
140 | url = f"{url}&types={formatted_types}" if "?" in url else f"{url}?types={formatted_types}"
141 | if reverse:
142 | url = (
143 | f"{url}&reverse={str(reverse).lower()}"
144 | if "?" in url
145 | else f"{url}?reverse={str(reverse).lower()}"
146 | )
147 | client_ = get_async_client(client)
148 | response = await client_.get(url)
149 |
150 | return TaskStatus(**response.json())
151 |
152 |
153 | async def async_wait_for_task(
154 | client: HttpxAsyncClient | AsyncClient,
155 | task_id: int,
156 | *,
157 | timeout_in_ms: int | None = 5000,
158 | interval_in_ms: int = 50,
159 | raise_for_status: bool = False,
160 | ) -> TaskResult:
161 | client_ = get_async_client(client)
162 | handler = _get_json_handler(client)
163 | url = f"tasks/{task_id}"
164 | http_requests = AsyncHttpRequests(client_, handler)
165 | start_time = datetime.now()
166 | elapsed_time = 0.0
167 |
168 | if timeout_in_ms:
169 | while elapsed_time < timeout_in_ms:
170 | response = await http_requests.get(url)
171 | status = TaskResult(**response.json())
172 | if status.status in ("succeeded", "failed"):
173 | if raise_for_status and status.status == "failed":
174 | raise MeilisearchTaskFailedError(f"Task {task_id} failed")
175 | return status
176 | await asyncio.sleep(interval_in_ms / 1000)
177 | time_delta = datetime.now() - start_time
178 | elapsed_time = time_delta.seconds * 1000 + time_delta.microseconds / 1000
179 | raise MeilisearchTimeoutError(
180 | f"timeout of {timeout_in_ms}ms has exceeded on process {task_id} when waiting for pending update to resolve."
181 | )
182 | else:
183 | while True:
184 | response = await http_requests.get(url)
185 | status = TaskResult(**response.json())
186 | if status.status in ("succeeded", "failed"):
187 | if raise_for_status and status.status == "failed":
188 | raise MeilisearchTaskFailedError(f"Task {task_id} failed")
189 | return status
190 | await asyncio.sleep(interval_in_ms / 1000)
191 |
192 |
193 | def cancel_tasks(
194 | client: HttpxClient | Client,
195 | *,
196 | uids: list[int] | None = None,
197 | index_uids: list[int] | None = None,
198 | statuses: list[str] | None = None,
199 | types: list[str] | None = None,
200 | before_enqueued_at: datetime | None = None,
201 | after_enqueued_at: datetime | None = None,
202 | before_started_at: datetime | None = None,
203 | after_finished_at: datetime | None = None,
204 | ) -> TaskInfo:
205 | parameters = _process_params(
206 | uids,
207 | index_uids,
208 | statuses,
209 | types,
210 | before_enqueued_at,
211 | after_enqueued_at,
212 | before_started_at,
213 | after_finished_at,
214 | )
215 |
216 | if not parameters:
217 | # Cancel all tasks if no parmaeters provided
218 | parameters["statuses"] = "enqueued,processing"
219 |
220 | url = f"tasks/cancel?{urlencode(parameters)}"
221 | client_ = get_client(client)
222 | response = client_.post(url)
223 |
224 | return TaskInfo(**response.json())
225 |
226 |
227 | def delete_tasks(
228 | client: HttpxClient | Client,
229 | *,
230 | uids: list[int] | None = None,
231 | index_uids: list[int] | None = None,
232 | statuses: list[str] | None = None,
233 | types: list[str] | None = None,
234 | before_enqueued_at: datetime | None = None,
235 | after_enqueued_at: datetime | None = None,
236 | before_started_at: datetime | None = None,
237 | after_finished_at: datetime | None = None,
238 | ) -> TaskInfo:
239 | parameters = _process_params(
240 | uids,
241 | index_uids,
242 | statuses,
243 | types,
244 | before_enqueued_at,
245 | after_enqueued_at,
246 | before_started_at,
247 | after_finished_at,
248 | )
249 |
250 | if not parameters:
251 | # delete all tasks if no parmaeters provided
252 | parameters["statuses"] = "canceled,enqueued,failed,processing,succeeded"
253 |
254 | url = f"tasks?{urlencode(parameters)}"
255 | client_ = get_client(client)
256 | response = client_.delete(url)
257 |
258 | return TaskInfo(**response.json())
259 |
260 |
261 | def get_task(client: HttpxClient | Client, task_id: int) -> TaskResult:
262 | client_ = get_client(client)
263 | response = client_.get(f"tasks/{task_id}")
264 |
265 | return TaskResult(**response.json())
266 |
267 |
268 | def get_tasks(
269 | client: HttpxClient | Client,
270 | *,
271 | index_ids: list[str] | None = None,
272 | types: str | list[str] | None = None,
273 | reverse: bool | None = None,
274 | ) -> TaskStatus:
275 | url = f"tasks?indexUids={','.join(index_ids)}" if index_ids else "tasks"
276 | if types:
277 | formatted_types = ",".join(types) if isinstance(types, list) else types
278 | url = f"{url}&types={formatted_types}" if "?" in url else f"{url}?types={formatted_types}"
279 | if reverse:
280 | url = (
281 | f"{url}&reverse={str(reverse).lower()}"
282 | if "?" in url
283 | else f"{url}?reverse={str(reverse).lower()}"
284 | )
285 | client_ = get_client(client)
286 | response = client_.get(url)
287 |
288 | return TaskStatus(**response.json())
289 |
290 |
291 | def wait_for_task(
292 | client: HttpxClient | Client,
293 | task_id: int,
294 | *,
295 | timeout_in_ms: int | None = 5000,
296 | interval_in_ms: int = 50,
297 | raise_for_status: bool = False,
298 | ) -> TaskResult:
299 | client_ = get_client(client)
300 | handler = _get_json_handler(client)
301 | url = f"tasks/{task_id}"
302 | http_requests = HttpRequests(client_, json_handler=handler)
303 | start_time = datetime.now()
304 | elapsed_time = 0.0
305 |
306 | if timeout_in_ms:
307 | while elapsed_time < timeout_in_ms:
308 | response = http_requests.get(url)
309 | status = TaskResult(**response.json())
310 | if status.status in ("succeeded", "failed"):
311 | if raise_for_status and status.status == "failed":
312 | raise MeilisearchTaskFailedError(f"Task {task_id} failed")
313 | return status
314 | time.sleep(interval_in_ms / 1000)
315 | time_delta = datetime.now() - start_time
316 | elapsed_time = time_delta.seconds * 1000 + time_delta.microseconds / 1000
317 | raise MeilisearchTimeoutError(
318 | f"timeout of {timeout_in_ms}ms has exceeded on process {task_id} when waiting for pending update to resolve."
319 | )
320 | else:
321 | while True:
322 | response = http_requests.get(url)
323 | status = TaskResult(**response.json())
324 | if status.status in ("succeeded", "failed"):
325 | if raise_for_status and status.status == "failed":
326 | raise MeilisearchTaskFailedError(f"Task {task_id} failed")
327 | return status
328 | time.sleep(interval_in_ms / 1000)
329 |
330 |
331 | def _get_json_handler(
332 | client: AsyncClient | Client | HttpxAsyncClient | HttpxClient,
333 | ) -> BuiltinHandler | OrjsonHandler | UjsonHandler:
334 | if isinstance(client, (HttpxAsyncClient, HttpxClient)):
335 | return BuiltinHandler()
336 |
337 | return client.json_handler
338 |
339 |
340 | def _process_params(
341 | uids: list[int] | None = None,
342 | index_uids: list[int] | None = None,
343 | statuses: list[str] | None = None,
344 | types: list[str] | None = None,
345 | before_enqueued_at: datetime | None = None,
346 | after_enqueued_at: datetime | None = None,
347 | before_started_at: datetime | None = None,
348 | after_finished_at: datetime | None = None,
349 | ) -> dict[str, str]:
350 | parameters = {}
351 | if uids:
352 | parameters["uids"] = ",".join([str(x) for x in uids])
353 | if index_uids:
354 | parameters["indexUids"] = ",".join([str(x) for x in index_uids])
355 | if statuses:
356 | parameters["statuses"] = ",".join(statuses)
357 | if types:
358 | parameters["types"] = ",".join(types)
359 | if before_enqueued_at:
360 | parameters["beforeEnqueuedAt"] = f"{before_enqueued_at.isoformat()}Z"
361 | if after_enqueued_at:
362 | parameters["afterEnqueuedAt"] = f"{after_enqueued_at.isoformat()}Z"
363 | if before_started_at:
364 | parameters["beforeStartedAt"] = f"{before_started_at.isoformat()}Z"
365 | if after_finished_at:
366 | parameters["afterFinishedAt"] = f"{after_finished_at.isoformat()}Z"
367 |
368 | return parameters
369 |
--------------------------------------------------------------------------------
/meilisearch_python_sdk/_utils.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import sys
4 | from datetime import datetime
5 | from functools import lru_cache
6 | from typing import TYPE_CHECKING
7 |
8 | from httpx import AsyncClient as HttpxAsyncClient
9 | from httpx import Client as HttpxClient
10 |
11 | if TYPE_CHECKING:
12 | from meilisearch_python_sdk._client import AsyncClient, Client # pragma: no cover
13 |
14 |
15 | def get_async_client(
16 | client: AsyncClient | HttpxAsyncClient,
17 | ) -> HttpxAsyncClient:
18 | if isinstance(client, HttpxAsyncClient):
19 | return client
20 |
21 | return client.http_client
22 |
23 |
24 | def get_client(
25 | client: Client | HttpxClient,
26 | ) -> HttpxClient:
27 | if isinstance(client, HttpxClient):
28 | return client
29 |
30 | return client.http_client
31 |
32 |
33 | def iso_to_date_time(iso_date: datetime | str | None) -> datetime | None:
34 | """Handle conversion of iso string to datetime.
35 |
36 | The microseconds from Meilisearch are sometimes too long for python to convert so this
37 | strips off the last digits to shorten it when that happens.
38 | """
39 | if not iso_date:
40 | return None
41 |
42 | if isinstance(iso_date, datetime):
43 | return iso_date
44 |
45 | try:
46 | return datetime.strptime(iso_date, "%Y-%m-%dT%H:%M:%S.%fZ")
47 | except ValueError:
48 | split = iso_date.split(".")
49 | if len(split) < 2:
50 | raise
51 | reduce = len(split[1]) - 6
52 | reduced = f"{split[0]}.{split[1][:-reduce]}Z"
53 | return datetime.strptime(reduced, "%Y-%m-%dT%H:%M:%S.%fZ")
54 |
55 |
56 | @lru_cache(maxsize=1)
57 | def use_task_groups() -> bool:
58 | return True if sys.version_info >= (3, 11) else False
59 |
--------------------------------------------------------------------------------
/meilisearch_python_sdk/_version.py:
--------------------------------------------------------------------------------
1 | VERSION = "4.6.0"
2 |
--------------------------------------------------------------------------------
/meilisearch_python_sdk/decorators.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import asyncio
4 | from functools import wraps
5 | from typing import Any, Callable, NamedTuple
6 |
7 | from meilisearch_python_sdk import AsyncClient, Client
8 | from meilisearch_python_sdk._utils import use_task_groups
9 |
10 |
11 | class ConnectionInfo(NamedTuple):
12 | """Infomation on how to connect to Meilisearch.
13 |
14 | url: URL for the Meilisearch server.
15 | api_key: The API key for the server.
16 | """
17 |
18 | url: str
19 | api_key: str
20 |
21 |
22 | def async_add_documents(
23 | *,
24 | index_name: str,
25 | connection_info: AsyncClient | ConnectionInfo,
26 | batch_size: int | None = None,
27 | primary_key: str | None = None,
28 | wait_for_task: bool = False,
29 | verify: bool = True,
30 | ) -> Callable:
31 | """Decorator that takes the returned documents from a function and asyncronously adds them to Meilisearch.
32 |
33 | It is required that either an async_client or url is provided.
34 |
35 | Args:
36 | index_name: The name of the index to which the documents should be added.
37 | connection_info: Either an AsyncClient instance ConnectionInfo with informtaion on how to
38 | connect to Meilisearch.
39 | batch_size: If provided the documents will be sent in batches of the specified size.
40 | Otherwise all documents are sent at once. Default = None.
41 | primary_key: The primary key of the documents. This will be ignored if already set.
42 | Defaults to None.
43 | wait_for_task: If set to `True` the decorator will wait for the document addition to finish
44 | indexing before returning, otherwise it will return right away. Default = False.
45 | verify: If set to `False` the decorator will not verify the SSL certificate of the server.
46 |
47 | Returns:
48 | The list of documents proviced by the decorated function.
49 |
50 | Raises:
51 | MeilisearchCommunicationError: If there was an error communicating with the server.
52 | MeilisearchApiError: If the Meilisearch API returned an error.
53 | ValueError: If neither an async_client nor an url is provided.
54 |
55 | Examples
56 | >>> from meilisearch_python_sdk import AsyncClient
57 | >>> from meilisearch_python_sdk.decorators import async_add_documents, ConnectionInfo
58 | >>>
59 | >>>
60 | >>> # with `AsyncClient`
61 | >>> client = AsyncClient(url="http://localhost:7700", api_key="masterKey")
62 | >>> @async_add_documents(index_name="movies", connection_info=client)
63 | >>> async def my_function() -> list[dict[str, Any]]:
64 | >>> return [{"id": 1, "title": "Test 1"}, {"id": 2, "title": "Test 2"}]
65 | >>>
66 | >>> # with `ConnectionInfo`
67 | >>> @async_add_documents(
68 | index_name="movies",
69 | connection_info=ConnectionInfo(url="http://localhost:7700", api_key="masterKey"),
70 | )
71 | >>> async def my_function() -> list[dict[str, Any]]:
72 | >>> return [{"id": 1, "title": "Test 1"}, {"id": 2, "title": "Test 2"}]
73 | """
74 |
75 | def decorator(func: Callable) -> Callable:
76 | @wraps(func)
77 | async def wrapper(*args: Any, **kwargs: Any) -> Any:
78 | result = await func(*args, **kwargs)
79 | if isinstance(connection_info, AsyncClient):
80 | await _async_add_documents(
81 | connection_info,
82 | index_name,
83 | result,
84 | batch_size,
85 | primary_key,
86 | wait_for_task,
87 | )
88 | return result
89 |
90 | async with AsyncClient(
91 | connection_info.url, connection_info.api_key, verify=verify
92 | ) as client:
93 | await _async_add_documents(
94 | client, index_name, result, batch_size, primary_key, wait_for_task
95 | )
96 |
97 | return result
98 |
99 | return wrapper
100 |
101 | return decorator
102 |
103 |
104 | def add_documents(
105 | *,
106 | index_name: str,
107 | connection_info: Client | ConnectionInfo,
108 | batch_size: int | None = None,
109 | primary_key: str | None = None,
110 | wait_for_task: bool = False,
111 | verify: bool = True,
112 | ) -> Callable:
113 | """Decorator that takes the returned documents from a function and adds them to Meilisearch.
114 |
115 | It is required that either an client or url is provided.
116 |
117 | Args:
118 | index_name: The name of the index to which the documents should be added.
119 | connection_info: Either an Client instance ConnectionInfo with informtaion on how to
120 | connect to Meilisearch.
121 | batch_size: If provided the documents will be sent in batches of the specified size.
122 | Otherwise all documents are sent at once. Default = None.
123 | primary_key: The primary key of the documents. This will be ignored if already set.
124 | Defaults to None.
125 | wait_for_task: If set to `True` the decorator will wait for the document addition to finish
126 | indexing before returning, otherwise it will return right away. Default = False.
127 | verify: If set to `False` the decorator will not verify the SSL certificate of the server.
128 |
129 | Returns:
130 | The list of documents proviced by the decorated function.
131 |
132 | Raises:
133 | MeilisearchCommunicationError: If there was an error communicating with the server.
134 | MeilisearchApiError: If the Meilisearch API returned an error.
135 | ValueError: If neither an async_client nor an url is provided.
136 |
137 | Examples
138 | >>> from meilisearch_python_sdk import Client
139 | >>> from meilisearch_python_sdk.decorators import add_documents, ConnectionInfo
140 | >>>
141 | >>>
142 | >>> # With `Client`
143 | >>> client = Client(url="http://localhost:7700", api_key="masterKey")
144 | >>> @add_documents(index_name="movies", connection_info=client)
145 | >>> def my_function() -> list[dict[str, Any]]:
146 | >>> return [{"id": 1, "title": "Test 1"}, {"id": 2, "title": "Test 2"}]
147 | >>>
148 | >>> # With `ConnectionInfo`
149 | >>> @add_documents(
150 | index_name="movies",
151 | connection_info=ConnectionInfo(url="http://localhost:7700", api_key="masterKey"),
152 | )
153 | >>> def my_function() -> list[dict[str, Any]]:
154 | >>> return [{"id": 1, "title": "Test 1"}, {"id": 2, "title": "Test 2"}]
155 | """
156 |
157 | def decorator(func: Callable) -> Callable:
158 | @wraps(func)
159 | def wrapper(*args: Any, **kwargs: Any) -> Any:
160 | result = func(*args, **kwargs)
161 | if isinstance(connection_info, Client):
162 | _add_documents(
163 | connection_info,
164 | index_name,
165 | result,
166 | batch_size,
167 | primary_key,
168 | wait_for_task,
169 | )
170 | return result
171 |
172 | decorator_client = Client(
173 | url=connection_info.url, api_key=connection_info.api_key, verify=verify
174 | )
175 | _add_documents(
176 | decorator_client,
177 | index_name,
178 | result,
179 | batch_size,
180 | primary_key,
181 | wait_for_task,
182 | )
183 |
184 | return result
185 |
186 | return wrapper
187 |
188 | return decorator
189 |
190 |
191 | async def _async_add_documents(
192 | async_client: AsyncClient,
193 | index_name: str,
194 | documents: Any,
195 | batch_size: int | None,
196 | primary_key: str | None,
197 | wait_for_task: bool,
198 | ) -> None:
199 | index = async_client.index(index_name)
200 | if not batch_size:
201 | task = await index.add_documents(documents, primary_key)
202 | if wait_for_task:
203 | await async_client.wait_for_task(task.task_uid, timeout_in_ms=None)
204 | return
205 |
206 | tasks = await index.add_documents_in_batches(
207 | documents, batch_size=batch_size, primary_key=primary_key
208 | )
209 |
210 | if wait_for_task:
211 | if not use_task_groups():
212 | waits = [async_client.wait_for_task(x.task_uid) for x in tasks]
213 | await asyncio.gather(*waits)
214 | return
215 |
216 | async with asyncio.TaskGroup() as tg: # type: ignore[attr-defined]
217 | [tg.create_task(async_client.wait_for_task(x.task_uid)) for x in tasks]
218 |
219 |
220 | def _add_documents(
221 | client: Client,
222 | index_name: str,
223 | documents: Any,
224 | batch_size: int | None,
225 | primary_key: str | None,
226 | wait_for_task: bool,
227 | ) -> None:
228 | index = client.index(index_name)
229 | if not batch_size:
230 | task = index.add_documents(documents, primary_key)
231 | if wait_for_task:
232 | client.wait_for_task(task.task_uid, timeout_in_ms=None)
233 | return
234 |
235 | tasks = index.add_documents_in_batches(
236 | documents, batch_size=batch_size, primary_key=primary_key
237 | )
238 |
239 | if wait_for_task:
240 | for task in tasks:
241 | client.wait_for_task(task.task_uid)
242 |
--------------------------------------------------------------------------------
/meilisearch_python_sdk/errors.py:
--------------------------------------------------------------------------------
1 | from httpx import Response
2 |
3 |
4 | class BatchNotFoundError(Exception):
5 | pass
6 |
7 |
8 | class InvalidDocumentError(Exception):
9 | """Error for documents that are not in a valid format for Meilisearch."""
10 |
11 | pass
12 |
13 |
14 | class InvalidRestriction(Exception):
15 | pass
16 |
17 |
18 | class MeilisearchError(Exception):
19 | """Generic class for Meilisearch error handling."""
20 |
21 | def __init__(self, message: str) -> None:
22 | self.message = message
23 | super().__init__(self.message)
24 |
25 | def __str__(self) -> str:
26 | return f"MeilisearchError. Error message: {self.message}."
27 |
28 |
29 | class MeilisearchApiError(MeilisearchError):
30 | """Error sent by Meilisearch API."""
31 |
32 | def __init__(self, error: str, response: Response) -> None:
33 | self.status_code = response.status_code
34 | self.code = ""
35 | self.message = ""
36 | self.link = ""
37 | self.error_type = ""
38 | if response.content:
39 | self.message = f"Error message: {response.json().get('message') or ''}"
40 | self.code = f"{response.json().get('code') or ''}"
41 | self.error_type = f"{response.json().get('type') or ''}"
42 | self.link = f"Error documentation: {response.json().get('link') or ''}"
43 | else:
44 | self.message = error
45 | super().__init__(self.message)
46 |
47 | def __str__(self) -> str:
48 | return f"MeilisearchApiError.{self.code} {self.message} {self.error_type} {self.link}"
49 |
50 |
51 | class MeilisearchCommunicationError(MeilisearchError):
52 | """Error when connecting to Meilisearch."""
53 |
54 | def __str__(self) -> str:
55 | return f"MeilisearchCommunicationError, {self.message}"
56 |
57 |
58 | class MeilisearchTaskFailedError(MeilisearchError):
59 | """Error when a task is in the failed status."""
60 |
61 | def __str__(self) -> str:
62 | return f"MeilisearchTaskFailedError, {self.message}"
63 |
64 |
65 | class MeilisearchTimeoutError(MeilisearchError):
66 | """Error when Meilisearch operation takes longer than expected."""
67 |
68 | def __str__(self) -> str:
69 | return f"MeilisearchTimeoutError, {self.message}"
70 |
71 |
72 | class PayloadTooLarge(Exception):
73 | """Error when the payload is larger than the allowed payload size."""
74 |
75 | pass
76 |
--------------------------------------------------------------------------------
/meilisearch_python_sdk/json_handler.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import json
4 | from abc import ABC, abstractmethod
5 | from typing import Any
6 |
7 | try:
8 | import orjson
9 | except ImportError: # pragma: nocover
10 | orjson = None # type: ignore
11 |
12 | try:
13 | import ujson
14 | except ImportError: # pragma: nocover
15 | ujson = None # type: ignore
16 |
17 |
18 | class _JsonHandler(ABC):
19 | @staticmethod
20 | @abstractmethod
21 | def dumps(obj: Any) -> str: ...
22 |
23 | @staticmethod
24 | @abstractmethod
25 | def loads(json_string: str | bytes | bytearray) -> Any: ...
26 |
27 |
28 | class BuiltinHandler(_JsonHandler):
29 | serializer: type[json.JSONEncoder] | None = None
30 |
31 | def __init__(self, serializer: type[json.JSONEncoder] | None = None) -> None:
32 | """Uses the json module from the Python standard library.
33 |
34 | Args:
35 | serializer: A custom JSONEncode to handle serializing fields that the build in
36 | json.dumps cannot handle, for example UUID and datetime. Defaults to None.
37 | """
38 | BuiltinHandler.serializer = serializer
39 |
40 | @staticmethod
41 | def dumps(obj: Any) -> str:
42 | return json.dumps(obj, cls=BuiltinHandler.serializer)
43 |
44 | @staticmethod
45 | def loads(json_string: str | bytes | bytearray) -> Any:
46 | return json.loads(json_string)
47 |
48 |
49 | class OrjsonHandler(_JsonHandler):
50 | def __init__(self) -> None:
51 | if orjson is None: # pragma: no cover
52 | raise ValueError("orjson must be installed to use the OrjsonHandler")
53 |
54 | @staticmethod
55 | def dumps(obj: Any) -> str:
56 | return orjson.dumps(obj).decode("utf-8")
57 |
58 | @staticmethod
59 | def loads(json_string: str | bytes | bytearray) -> Any:
60 | return orjson.loads(json_string)
61 |
62 |
63 | class UjsonHandler(_JsonHandler):
64 | def __init__(self) -> None:
65 | if ujson is None: # pragma: no cover
66 | raise ValueError("ujson must be installed to use the UjsonHandler")
67 |
68 | @staticmethod
69 | def dumps(obj: Any) -> str:
70 | return ujson.dumps(obj)
71 |
72 | @staticmethod
73 | def loads(json_string: str | bytes | bytearray) -> Any:
74 | return ujson.loads(json_string)
75 |
--------------------------------------------------------------------------------
/meilisearch_python_sdk/models/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sanders41/meilisearch-python-sdk/0db7c9b4c4c366e7a91072bcad6826abf42ce861/meilisearch_python_sdk/models/__init__.py
--------------------------------------------------------------------------------
/meilisearch_python_sdk/models/batch.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from datetime import datetime
4 |
5 | from camel_converter.pydantic_base import CamelBase
6 | from pydantic import Field, field_validator
7 |
8 | from meilisearch_python_sdk._utils import iso_to_date_time
9 | from meilisearch_python_sdk.types import JsonDict
10 |
11 |
12 | class BatchId(CamelBase):
13 | uid: int
14 |
15 |
16 | class Status(CamelBase):
17 | succeeded: int | None = None
18 | failed: int | None = None
19 | cancelled: int | None = None
20 | processing: int | None = None
21 | enqueued: int | None = None
22 |
23 |
24 | class Stats(CamelBase):
25 | total_nb_tasks: int
26 | status: Status
27 | batch_types: JsonDict | None = Field(None, alias="types")
28 | index_uids: JsonDict | None = None
29 | progress_trace: JsonDict | None = None
30 | write_channel_congestion: JsonDict | None = None
31 | internal_database_sizes: JsonDict | None = None
32 |
33 |
34 | class BatchResult(BatchId):
35 | details: JsonDict | None = None
36 | progress: JsonDict | None = None
37 | stats: Stats
38 | duration: str | None = None
39 | started_at: datetime | None = None
40 | finished_at: datetime | None = None
41 |
42 | @field_validator("started_at", mode="before") # type: ignore[attr-defined]
43 | @classmethod
44 | def validate_started_at(cls, v: str) -> datetime | None:
45 | return iso_to_date_time(v)
46 |
47 | @field_validator("finished_at", mode="before") # type: ignore[attr-defined]
48 | @classmethod
49 | def validate_finished_at(cls, v: str) -> datetime | None:
50 | return iso_to_date_time(v)
51 |
52 |
53 | class BatchStatus(CamelBase):
54 | results: list[BatchResult]
55 | total: int
56 | limit: int
57 | from_: int | None = Field(None, alias="from")
58 | next: int | None = None
59 |
--------------------------------------------------------------------------------
/meilisearch_python_sdk/models/client.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from collections.abc import Mapping
4 | from datetime import datetime
5 |
6 | import pydantic
7 | from camel_converter.pydantic_base import CamelBase
8 |
9 | from meilisearch_python_sdk._utils import iso_to_date_time
10 | from meilisearch_python_sdk.models.index import IndexStats
11 |
12 |
13 | class ClientStats(CamelBase):
14 | database_size: int
15 | used_database_size: int | None = None
16 | last_update: datetime | None = None
17 | indexes: dict[str, IndexStats] | None = None
18 |
19 | @pydantic.field_validator("last_update", mode="before") # type: ignore[attr-defined]
20 | @classmethod
21 | def validate_last_update(cls, v: str) -> datetime | None:
22 | return iso_to_date_time(v)
23 |
24 |
25 | class _KeyBase(CamelBase):
26 | uid: str
27 | name: str | None = None
28 | description: str | None = None
29 | actions: list[str]
30 | indexes: list[str]
31 | expires_at: datetime | None = None
32 |
33 | model_config = pydantic.ConfigDict(ser_json_timedelta="iso8601") # type: ignore[typeddict-unknown-key]
34 |
35 | @pydantic.field_validator("expires_at", mode="before") # type: ignore[attr-defined]
36 | @classmethod
37 | def validate_expires_at(cls, v: str) -> datetime | None:
38 | return iso_to_date_time(v)
39 |
40 |
41 | class Key(_KeyBase):
42 | key: str
43 | created_at: datetime
44 | updated_at: datetime | None = None
45 |
46 | @pydantic.field_validator("created_at", mode="before") # type: ignore[attr-defined]
47 | @classmethod
48 | def validate_created_at(cls, v: str) -> datetime:
49 | converted = iso_to_date_time(v)
50 |
51 | if not converted: # pragma: no cover
52 | raise ValueError("created_at is required")
53 |
54 | return converted
55 |
56 | @pydantic.field_validator("updated_at", mode="before") # type: ignore[attr-defined]
57 | @classmethod
58 | def validate_updated_at(cls, v: str) -> datetime | None:
59 | return iso_to_date_time(v)
60 |
61 |
62 | class KeyCreate(CamelBase):
63 | name: str | None = None
64 | description: str | None = None
65 | actions: list[str]
66 | indexes: list[str]
67 | expires_at: datetime | None = None
68 |
69 | model_config = pydantic.ConfigDict(ser_json_timedelta="iso8601") # type: ignore[typeddict-unknown-key]
70 |
71 |
72 | class KeyUpdate(CamelBase):
73 | key: str
74 | name: str | None = None
75 | description: str | None = None
76 | actions: list[str] | None = None
77 | indexes: list[str] | None = None
78 | expires_at: datetime | None = None
79 |
80 | model_config = pydantic.ConfigDict(ser_json_timedelta="iso8601") # type: ignore[typeddict-unknown-key]
81 |
82 |
83 | class KeySearch(CamelBase):
84 | results: list[Key]
85 | offset: int
86 | limit: int
87 | total: int
88 |
89 |
90 | class Remote(CamelBase):
91 | url: str | None = None
92 | search_api_key: str | None = None
93 |
94 |
95 | class Network(CamelBase):
96 | self_: str | None = pydantic.Field(None, alias="self")
97 | remotes: Mapping[str, Remote] | None = None
98 |
--------------------------------------------------------------------------------
/meilisearch_python_sdk/models/documents.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from camel_converter.pydantic_base import CamelBase
4 |
5 | from meilisearch_python_sdk.types import JsonDict
6 |
7 |
8 | class DocumentsInfo(CamelBase):
9 | results: list[JsonDict]
10 | offset: int
11 | limit: int
12 | total: int
13 |
--------------------------------------------------------------------------------
/meilisearch_python_sdk/models/health.py:
--------------------------------------------------------------------------------
1 | from camel_converter.pydantic_base import CamelBase
2 |
3 |
4 | class Health(CamelBase):
5 | status: str
6 |
--------------------------------------------------------------------------------
/meilisearch_python_sdk/models/index.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from datetime import datetime
4 |
5 | import pydantic
6 | from camel_converter.pydantic_base import CamelBase
7 |
8 | from meilisearch_python_sdk._utils import iso_to_date_time
9 |
10 |
11 | class IndexBase(CamelBase):
12 | uid: str
13 | primary_key: str | None = None
14 |
15 |
16 | class IndexInfo(IndexBase):
17 | created_at: datetime
18 | updated_at: datetime
19 |
20 | @pydantic.field_validator("created_at", mode="before") # type: ignore[attr-defined]
21 | @classmethod
22 | def validate_created_at(cls, v: str) -> datetime:
23 | converted = iso_to_date_time(v)
24 |
25 | if not converted: # pragma: no cover
26 | raise ValueError("created_at is required")
27 |
28 | return converted
29 |
30 | @pydantic.field_validator("updated_at", mode="before") # type: ignore[attr-defined]
31 | @classmethod
32 | def validate_updated_at(cls, v: str) -> datetime:
33 | converted = iso_to_date_time(v)
34 |
35 | if not converted: # pragma: no cover
36 | raise ValueError("updated_at is required")
37 |
38 | return converted
39 |
40 |
41 | class IndexStats(CamelBase):
42 | number_of_documents: int
43 | number_of_embedded_documents: int | None = None
44 | number_of_embeddings: int | None = None
45 | is_indexing: bool
46 | field_distribution: dict[str, int]
47 |
--------------------------------------------------------------------------------
/meilisearch_python_sdk/models/search.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from typing import Generic, Literal, TypeVar
4 |
5 | from camel_converter.pydantic_base import CamelBase
6 | from pydantic import Field, field_validator
7 |
8 | from meilisearch_python_sdk.errors import MeilisearchError
9 | from meilisearch_python_sdk.types import Filter, JsonDict
10 |
11 | T = TypeVar("T")
12 |
13 |
14 | class FacetHits(CamelBase):
15 | value: str
16 | count: int
17 |
18 |
19 | class FacetSearchResults(CamelBase):
20 | facet_hits: list[FacetHits]
21 | facet_query: str | None
22 | processing_time_ms: int
23 |
24 |
25 | class Hybrid(CamelBase):
26 | semantic_ratio: float
27 | embedder: str
28 |
29 |
30 | class MergeFacets(CamelBase):
31 | max_values_per_facet: int
32 |
33 |
34 | class Federation(CamelBase):
35 | limit: int = 20
36 | offset: int = 0
37 | facets_by_index: dict[str, list[str]] | None = None
38 |
39 |
40 | class FederationMerged(CamelBase):
41 | limit: int = 20
42 | offset: int = 0
43 | facets_by_index: dict[str, list[str]] | None = None
44 | merge_facets: MergeFacets | None
45 |
46 |
47 | class SearchParams(CamelBase):
48 | index_uid: str
49 | query: str | None = Field(None, alias="q")
50 | offset: int = 0
51 | limit: int = 20
52 | filter: Filter | None = None
53 | facets: list[str] | None = None
54 | attributes_to_retrieve: list[str] = ["*"]
55 | attributes_to_crop: list[str] | None = None
56 | crop_length: int = 200
57 | attributes_to_highlight: list[str] | None = None
58 | sort: list[str] | None = None
59 | show_matches_position: bool = False
60 | highlight_pre_tag: str = ""
61 | highlight_post_tag: str = ""
62 | crop_marker: str = "..."
63 | matching_strategy: Literal["all", "last", "frequency"] = "last"
64 | hits_per_page: int | None = None
65 | page: int | None = None
66 | attributes_to_search_on: list[str] | None = None
67 | show_ranking_score: bool = False
68 | show_ranking_score_details: bool = False
69 | ranking_score_threshold: float | None = None
70 | vector: list[float] | None = None
71 | hybrid: Hybrid | None = None
72 | locales: list[str] | None = None
73 | retrieve_vectors: bool | None = None
74 |
75 | @field_validator("ranking_score_threshold", mode="before") # type: ignore[attr-defined]
76 | @classmethod
77 | def validate_ranking_score_threshold(cls, v: float | None) -> float | None:
78 | if v and not 0.0 <= v <= 1.0:
79 | raise MeilisearchError("ranking_score_threshold must be between 0.0 and 1.0")
80 |
81 | return v
82 |
83 |
84 | class SearchResults(CamelBase, Generic[T]):
85 | hits: list[T]
86 | offset: int | None = None
87 | limit: int | None = None
88 | estimated_total_hits: int | None = None
89 | processing_time_ms: int
90 | query: str
91 | facet_distribution: JsonDict | None = None
92 | total_pages: int | None = None
93 | total_hits: int | None = None
94 | page: int | None = None
95 | hits_per_page: int | None = None
96 | semantic_hit_count: int | None = None
97 |
98 |
99 | class SearchResultsWithUID(SearchResults, Generic[T]):
100 | index_uid: str
101 |
102 |
103 | class SearchResultsFederated(CamelBase, Generic[T]):
104 | hits: list[T]
105 | offset: int | None = None
106 | limit: int | None = None
107 | estimated_total_hits: int | None = None
108 | processing_time_ms: int
109 | facet_distribution: JsonDict | None = None
110 | total_pages: int | None = None
111 | total_hits: int | None = None
112 | page: int | None = None
113 | hits_per_page: int | None = None
114 | semantic_hit_count: int | None = None
115 | facets_by_index: JsonDict | None = None
116 |
117 |
118 | class SimilarSearchResults(CamelBase, Generic[T]):
119 | hits: list[T]
120 | id: str
121 | processing_time_ms: int
122 | limit: int | None = None
123 | offset: int | None = None
124 | estimated_total_hits: int | None = None
125 |
--------------------------------------------------------------------------------
/meilisearch_python_sdk/models/settings.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from enum import Enum
4 | from typing import Literal
5 |
6 | import pydantic
7 | from camel_converter.pydantic_base import CamelBase
8 |
9 | from meilisearch_python_sdk.types import JsonDict
10 |
11 |
12 | class MinWordSizeForTypos(CamelBase):
13 | one_typo: int | None = None
14 | two_typos: int | None = None
15 |
16 |
17 | class TypoTolerance(CamelBase):
18 | enabled: bool = True
19 | disable_on_attributes: list[str] | None = None
20 | disable_on_words: list[str] | None = None
21 | min_word_size_for_typos: MinWordSizeForTypos | None = None
22 |
23 |
24 | class Faceting(CamelBase):
25 | max_values_per_facet: int
26 | sort_facet_values_by: dict[str, str] | None = None
27 |
28 | @pydantic.field_validator("sort_facet_values_by") # type: ignore[attr-defined]
29 | @classmethod
30 | def validate_facet_order(cls, v: dict[str, str] | None) -> dict[str, str] | None:
31 | if not v: # pragma: no cover
32 | return None
33 |
34 | for _, value in v.items():
35 | if value not in ("alpha", "count"):
36 | raise ValueError('facet_order must be either "alpha" or "count"')
37 |
38 | return v
39 |
40 |
41 | class Pagination(CamelBase):
42 | max_total_hits: int
43 |
44 |
45 | class Distribution(CamelBase):
46 | mean: float
47 | sigma: float
48 |
49 |
50 | class OpenAiEmbedder(CamelBase):
51 | source: str = "openAi"
52 | url: str | None = None
53 | model: str | None = None
54 | dimensions: int | None = None
55 | api_key: str | None = None
56 | document_template: str | None = None
57 | document_template_max_bytes: int | None = None
58 | distribution: Distribution | None = None
59 | binary_quantized: bool | None = None
60 |
61 |
62 | class HuggingFaceEmbedder(CamelBase):
63 | source: str = "huggingFace"
64 | model: str | None = None
65 | revision: str | None = None
66 | document_template: str | None = None
67 | document_template_max_bytes: int | None = None
68 | distribution: Distribution | None = None
69 | dimensions: int | None = None
70 | binary_quantized: bool | None = None
71 | pooling: Literal["useModel", "forceMean", "forceCls"] | None = None
72 |
73 |
74 | class OllamaEmbedder(CamelBase):
75 | source: str = "ollama"
76 | url: str | None = None
77 | api_key: str | None = None
78 | model: str
79 | dimensions: int | None = None
80 | document_template: str | None = None
81 | document_template_max_bytes: int | None = None
82 | distribution: Distribution | None = None
83 | binary_quantized: bool | None = None
84 |
85 |
86 | class RestEmbedder(CamelBase):
87 | source: str = "rest"
88 | url: str
89 | api_key: str | None = None
90 | dimensions: int
91 | document_template: str | None = None
92 | document_template_max_bytes: int | None = None
93 | distribution: Distribution | None = None
94 | headers: JsonDict | None = None
95 | request: JsonDict
96 | response: JsonDict
97 | binary_quantized: bool | None = None
98 |
99 |
100 | class UserProvidedEmbedder(CamelBase):
101 | source: str = "userProvided"
102 | dimensions: int
103 | distribution: Distribution | None = None
104 | document_template: str | None = None
105 | document_template_max_bytes: int | None = None
106 | binary_quantized: bool | None = None
107 |
108 |
109 | class CompositeEmbedder(CamelBase):
110 | source: str = "composite"
111 | search_embedder: (
112 | OpenAiEmbedder | HuggingFaceEmbedder | OllamaEmbedder | RestEmbedder | UserProvidedEmbedder
113 | )
114 | indexing_embedder: (
115 | OpenAiEmbedder | HuggingFaceEmbedder | OllamaEmbedder | RestEmbedder | UserProvidedEmbedder
116 | )
117 |
118 |
119 | class Embedders(CamelBase):
120 | embedders: dict[
121 | str,
122 | OpenAiEmbedder
123 | | HuggingFaceEmbedder
124 | | OllamaEmbedder
125 | | RestEmbedder
126 | | UserProvidedEmbedder
127 | | CompositeEmbedder,
128 | ]
129 |
130 |
131 | class ProximityPrecision(str, Enum):
132 | BY_WORD = "byWord"
133 | BY_ATTRIBUTE = "byAttribute"
134 |
135 |
136 | class LocalizedAttributes(CamelBase):
137 | locales: list[str]
138 | attribute_patterns: list[str]
139 |
140 |
141 | class Filter(CamelBase):
142 | equality: bool
143 | comparison: bool
144 |
145 |
146 | class FilterableAttributeFeatures(CamelBase):
147 | facet_search: bool
148 | filter: Filter
149 |
150 |
151 | class FilterableAttributes(CamelBase):
152 | attribute_patterns: list[str]
153 | features: FilterableAttributeFeatures
154 |
155 |
156 | class MeilisearchSettings(CamelBase):
157 | synonyms: JsonDict | None = None
158 | stop_words: list[str] | None = None
159 | ranking_rules: list[str] | None = None
160 | filterable_attributes: list[str] | list[FilterableAttributes] | None = None
161 | distinct_attribute: str | None = None
162 | searchable_attributes: list[str] | None = None
163 | displayed_attributes: list[str] | None = None
164 | sortable_attributes: list[str] | None = None
165 | typo_tolerance: TypoTolerance | None = None
166 | faceting: Faceting | None = None
167 | pagination: Pagination | None = None
168 | proximity_precision: ProximityPrecision | None = None
169 | separator_tokens: list[str] | None = None
170 | non_separator_tokens: list[str] | None = None
171 | search_cutoff_ms: int | None = None
172 | dictionary: list[str] | None = None
173 | embedders: (
174 | dict[
175 | str,
176 | OpenAiEmbedder
177 | | HuggingFaceEmbedder
178 | | OllamaEmbedder
179 | | RestEmbedder
180 | | UserProvidedEmbedder
181 | | CompositeEmbedder,
182 | ]
183 | | None
184 | ) = None # Optional[Embedders] = None
185 | localized_attributes: list[LocalizedAttributes] | None = None
186 | facet_search: bool | None = None
187 | prefix_search: Literal["disabled", "indexingTime", "searchTime"] | None = None
188 |
--------------------------------------------------------------------------------
/meilisearch_python_sdk/models/task.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from datetime import datetime
4 |
5 | import pydantic
6 | from camel_converter.pydantic_base import CamelBase
7 | from pydantic import Field
8 |
9 | from meilisearch_python_sdk._utils import iso_to_date_time
10 | from meilisearch_python_sdk.types import JsonDict
11 |
12 |
13 | class TaskId(CamelBase):
14 | uid: int
15 |
16 |
17 | class TaskResult(TaskId):
18 | index_uid: str | None = None
19 | status: str
20 | task_type: str | JsonDict = Field(..., alias="type")
21 | details: JsonDict | None = None
22 | error: JsonDict | None = None
23 | canceled_by: int | None = None
24 | duration: str | None = None
25 | enqueued_at: datetime
26 | started_at: datetime | None = None
27 | finished_at: datetime | None = None
28 | batch_uid: int | None = None
29 |
30 | @pydantic.field_validator("enqueued_at", mode="before") # type: ignore[attr-defined]
31 | @classmethod
32 | def validate_enqueued_at(cls, v: str) -> datetime:
33 | converted = iso_to_date_time(v)
34 |
35 | if not converted: # pragma: no cover
36 | raise ValueError("enqueued_at is required")
37 |
38 | return converted
39 |
40 | @pydantic.field_validator("started_at", mode="before") # type: ignore[attr-defined]
41 | @classmethod
42 | def validate_started_at(cls, v: str) -> datetime | None:
43 | return iso_to_date_time(v)
44 |
45 | @pydantic.field_validator("finished_at", mode="before") # type: ignore[attr-defined]
46 | @classmethod
47 | def validate_finished_at(cls, v: str) -> datetime | None:
48 | return iso_to_date_time(v)
49 |
50 |
51 | class TaskStatus(CamelBase):
52 | results: list[TaskResult]
53 | total: int
54 | limit: int
55 | from_: int = Field(..., alias="from")
56 | next: int | None = None
57 |
58 |
59 | class TaskInfo(CamelBase):
60 | task_uid: int
61 | index_uid: str | None = None
62 | status: str
63 | task_type: str | JsonDict = Field(..., alias="type")
64 | enqueued_at: datetime
65 | batch_uid: int | None = None
66 |
67 | @pydantic.field_validator("enqueued_at", mode="before") # type: ignore[attr-defined]
68 | @classmethod
69 | def validate_enqueued_at(cls, v: str) -> datetime:
70 | converted = iso_to_date_time(v)
71 |
72 | if not converted: # pragma: no cover
73 | raise ValueError("enqueued_at is required")
74 |
75 | return converted
76 |
--------------------------------------------------------------------------------
/meilisearch_python_sdk/models/version.py:
--------------------------------------------------------------------------------
1 | from datetime import datetime
2 | from typing import Union
3 |
4 | from camel_converter.pydantic_base import CamelBase
5 |
6 |
7 | class Version(CamelBase):
8 | commit_sha: str
9 | commit_date: Union[datetime, str]
10 | pkg_version: str
11 |
--------------------------------------------------------------------------------
/meilisearch_python_sdk/plugins.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from collections.abc import Sequence
4 | from enum import Enum
5 | from typing import TYPE_CHECKING, Any, NamedTuple, Protocol
6 |
7 | if TYPE_CHECKING: # pragma: no cover
8 | from meilisearch_python_sdk.models.search import FacetSearchResults, SearchResults
9 | from meilisearch_python_sdk.models.task import TaskInfo
10 | from meilisearch_python_sdk.types import JsonDict, JsonMapping
11 |
12 |
13 | class AsyncEvent(Enum):
14 | PRE = "pre"
15 | CONCURRENT = "concurrent"
16 | POST = "post"
17 |
18 |
19 | class Event(Enum):
20 | PRE = "pre"
21 | POST = "post"
22 |
23 |
24 | class AsyncPlugin(Protocol):
25 | CONCURRENT_EVENT: bool
26 | POST_EVENT: bool
27 | PRE_EVENT: bool
28 |
29 | async def run_plugin(
30 | self, event: AsyncEvent, **kwargs: Any
31 | ) -> (
32 | None | list[JsonDict] | TaskInfo | list[TaskInfo] | SearchResults | FacetSearchResults
33 | ): # pragma: no cover
34 | ...
35 |
36 |
37 | class AsyncDocumentPlugin(Protocol):
38 | CONCURRENT_EVENT: bool
39 | POST_EVENT: bool
40 | PRE_EVENT: bool
41 |
42 | async def run_document_plugin(
43 | self,
44 | event: AsyncEvent,
45 | *,
46 | documents: Sequence[JsonMapping],
47 | primary_key: str | None,
48 | **kwargs: Any,
49 | ) -> Sequence[JsonMapping] | None: # pragma: no cover
50 | ...
51 |
52 |
53 | class AsyncPostSearchPlugin(Protocol):
54 | CONCURRENT_EVENT: bool
55 | POST_EVENT: bool
56 | PRE_EVENT: bool
57 |
58 | async def run_post_search_plugin(
59 | self,
60 | event: AsyncEvent,
61 | *,
62 | search_results: SearchResults,
63 | **kwargs: Any,
64 | ) -> SearchResults | None: # pragma: no cover
65 | ...
66 |
67 |
68 | class Plugin(Protocol):
69 | POST_EVENT: bool
70 | PRE_EVENT: bool
71 |
72 | def run_plugin(
73 | self, event: Event, **kwargs: Any
74 | ) -> (
75 | None | list[JsonDict] | TaskInfo | list[TaskInfo] | SearchResults | FacetSearchResults
76 | ): # pragma: no cover
77 | ...
78 |
79 |
80 | class DocumentPlugin(Protocol):
81 | POST_EVENT: bool
82 | PRE_EVENT: bool
83 |
84 | def run_document_plugin(
85 | self,
86 | event: Event,
87 | *,
88 | documents: Sequence[JsonMapping],
89 | primary_key: str | None,
90 | **kwargs: Any,
91 | ) -> Sequence[JsonMapping] | None: # pragma: no cover
92 | ...
93 |
94 |
95 | class PostSearchPlugin(Protocol):
96 | POST_EVENT: bool
97 | PRE_EVENT: bool
98 |
99 | def run_post_search_plugin(
100 | self, event: Event, *, search_results: SearchResults, **kwargs: Any
101 | ) -> SearchResults | None: # pragma: no cover
102 | ...
103 |
104 |
105 | class AsyncIndexPlugins(NamedTuple):
106 | add_documents_plugins: Sequence[AsyncPlugin | AsyncDocumentPlugin] | None = None
107 | delete_all_documents_plugins: Sequence[AsyncPlugin] | None = None
108 | delete_document_plugins: Sequence[AsyncPlugin] | None = None
109 | delete_documents_plugins: Sequence[AsyncPlugin] | None = None
110 | delete_documents_by_filter_plugins: Sequence[AsyncPlugin] | None = None
111 | facet_search_plugins: Sequence[AsyncPlugin] | None = None
112 | search_plugins: Sequence[AsyncPlugin | AsyncPostSearchPlugin] | None = None
113 | update_documents_plugins: Sequence[AsyncPlugin | AsyncDocumentPlugin] | None = None
114 |
115 |
116 | class IndexPlugins(NamedTuple):
117 | add_documents_plugins: Sequence[Plugin | DocumentPlugin] | None = None
118 | delete_all_documents_plugins: Sequence[Plugin] | None = None
119 | delete_document_plugins: Sequence[Plugin] | None = None
120 | delete_documents_plugins: Sequence[Plugin] | None = None
121 | delete_documents_by_filter_plugins: Sequence[Plugin] | None = None
122 | facet_search_plugins: Sequence[Plugin] | None = None
123 | search_plugins: Sequence[Plugin | PostSearchPlugin] | None = None
124 | update_documents_plugins: Sequence[Plugin | DocumentPlugin] | None = None
125 |
--------------------------------------------------------------------------------
/meilisearch_python_sdk/py.typed:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sanders41/meilisearch-python-sdk/0db7c9b4c4c366e7a91072bcad6826abf42ce861/meilisearch_python_sdk/py.typed
--------------------------------------------------------------------------------
/meilisearch_python_sdk/types.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from collections.abc import MutableMapping
4 | from typing import TYPE_CHECKING, Any, Union
5 |
6 | if TYPE_CHECKING: # pragma: no cover
7 | import sys
8 |
9 | if sys.version_info >= (3, 10):
10 | from typing import TypeAlias
11 | else:
12 | from typing_extensions import TypeAlias
13 |
14 | Filter: TypeAlias = Union[str, list[Union[str, list[str]]]]
15 | JsonDict: TypeAlias = dict[str, Any]
16 | JsonMapping: TypeAlias = MutableMapping[str, Any]
17 |
--------------------------------------------------------------------------------
/mkdocs.yaml:
--------------------------------------------------------------------------------
1 | site_name: Meilisearch Python SDK
2 | site_description: A Python async client for the Meilisearch API
3 | site_url: https://meilisearch-python-sdk.paulsanders.dev
4 |
5 | theme:
6 | name: material
7 | locale: en
8 | icon:
9 | repo: fontawesome/brands/github
10 | palette:
11 | - scheme: slate
12 | primary: green
13 | accent: blue
14 | toggle:
15 | icon: material/lightbulb-outline
16 | name: Switch to dark mode
17 | - scheme: default
18 | primary: green
19 | accent: blue
20 | toggle:
21 | icon: material/lightbulb
22 | name: Switch to light mode
23 | features:
24 | - search.suggest
25 | - search.highlight
26 | repo_name: sanders41/meilisearch-python-sdk
27 | repo_url: https://github.com/sanders41/meilisearch-python-sdk
28 |
29 | nav:
30 | - Home: index.md
31 | - API:
32 | - AsyncClient: async_client_api.md
33 | - Client: client_api.md
34 | - AsyncIndex: async_index_api.md
35 | - Index: index_api.md
36 | - Decorators: decorators_api.md
37 | - Plugins: plugins.md
38 | - JSON Handler: json_handler.md
39 | - Pydantic: pydantic.md
40 |
41 | plugins:
42 | - mkdocstrings
43 | - search
44 |
45 | extra_javascript:
46 | - "js/umami.js"
47 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | requires = ["hatchling"]
3 | build-backend = "hatchling.build"
4 |
5 | [project]
6 | name = "meilisearch-python-sdk"
7 | description = "A Python client providing both async and sync support for the Meilisearch API"
8 | authors = [ { name = "Paul Sanders", email = "paul@paulsanders.dev" } ]
9 | requires-python = ">=3.9"
10 | license = { file = "LICENSE" }
11 | readme = "README.md"
12 | keywords = ["meilisearch", "async", "python", "client", "sdk"]
13 | classifiers=[
14 | "Development Status :: 5 - Production/Stable",
15 | "Intended Audience :: Developers",
16 | "Programming Language :: Python :: 3.9",
17 | "Programming Language :: Python :: 3.10",
18 | "Programming Language :: Python :: 3.11",
19 | "Programming Language :: Python :: 3.12",
20 | "Programming Language :: Python :: 3.13",
21 | "License :: OSI Approved :: MIT License",
22 | "Operating System :: OS Independent",
23 | "Typing :: Typed",
24 | ]
25 | dynamic = ["version"]
26 | dependencies = [
27 | "aiofiles>=0.7",
28 | "camel-converter[pydantic]>=1.0.0",
29 | # allows pydantic to use pipe instead of Union
30 | "eval-type-backport>=0.2.0; python_version < '3.10'",
31 | "httpx[http2]>=0.17",
32 | "pydantic>=2.0.0",
33 | "PyJWT>=2.3.0",
34 | ]
35 |
36 | [project.optional-dependencies]
37 | orjson = ["orjson>=3.10.6"]
38 | ujson = ["ujson>=5.10.0"]
39 | all = ["orjson", "ujson"]
40 |
41 | [dependency-groups]
42 | dev = [
43 | "mkdocs==1.6.1",
44 | "mkdocs-material==9.6.14",
45 | "mkdocstrings[python]==0.29.1",
46 | "mypy[faster-cache]==1.16.0",
47 | "pre-commit==4.2.0",
48 | "pytest==8.4.0",
49 | "pytest-cov==6.1.1",
50 | "pytest-asyncio==1.0.0",
51 | "pytest-xdist==3.7.0",
52 | "ruff==0.11.13",
53 | "types-aiofiles==24.1.0.20250606",
54 | "typing-extensions==4.14.0",
55 | "types-ujson==5.10.0.20250326",
56 | "meilisearch==0.34.1",
57 | "rich==14.0.0",
58 | ]
59 |
60 | [tool.hatch.version]
61 | path = "meilisearch_python_sdk/_version.py"
62 |
63 | [project.urls]
64 | repository = "https://github.com/sanders41/meilisearch-python-sdk"
65 | homepage = "https://github.com/sanders41/meilisearch-python-sdk"
66 | documentation = "https://meilisearch-python-sdk.paulsanders.dev"
67 |
68 | [tool.pytest.ini_options]
69 | minversion = "6.0"
70 | addopts = "--cov=meilisearch_python_sdk --cov-report term-missing --no-cov-on-fail --ignore examples"
71 | asyncio_mode = "auto"
72 | asyncio_default_fixture_loop_scope = "session"
73 | asyncio_default_test_loop_scope = "session"
74 | markers = ["no_parallel"]
75 |
76 | [tool.mypy]
77 | check_untyped_defs = true
78 | disallow_untyped_defs = true
79 | plugins = ["pydantic.mypy"]
80 |
81 | [[tool.mypy.overrides]]
82 | module = ["tests.*", "examples/tests.*"]
83 | disallow_untyped_defs = false
84 |
85 | [[tool.mypy.overrides]]
86 | module = ["aiocache.*", "truststore.*"]
87 | ignore_missing_imports = true
88 |
89 | [tool.ruff]
90 | line-length = 100
91 | target-version = "py39"
92 | fix = true
93 |
94 | [tool.ruff.lint]
95 | select=[
96 | "E", # pycodestyle
97 | "B", # flake8-bugbear
98 | "W", # Warning
99 | "F", # pyflakes
100 | "UP", # pyupgrade
101 | "I001", # unsorted-imports
102 | "T201", # Don't allow print
103 | "T203", # Don't allow pprint
104 | "ASYNC", # flake8-async
105 | "RUF022", # Unsorted __all__
106 | "RUF023", # Unforted __slots__
107 | ]
108 | ignore=[
109 | # Recommened ignores by ruff when using formatter
110 | "E501",
111 | "W191",
112 | "E111",
113 | "E114",
114 | "E117",
115 | "D206",
116 | "D300",
117 | "Q000",
118 | "Q001",
119 | "Q002",
120 | "Q003",
121 | "COM812",
122 | "COM819",
123 | "ISC001",
124 | "ISC002",
125 | ]
126 |
--------------------------------------------------------------------------------
/tests/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sanders41/meilisearch-python-sdk/0db7c9b4c4c366e7a91072bcad6826abf42ce861/tests/__init__.py
--------------------------------------------------------------------------------
/tests/conftest.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import csv
3 | import json
4 | import ssl
5 | import sys
6 | from logging import warning
7 | from pathlib import Path
8 | from uuid import uuid4
9 |
10 | import pytest
11 |
12 | try:
13 | import truststore as truststore
14 | except ImportError:
15 | truststore = None
16 |
17 | from httpx import AsyncClient as HttpxAsyncClient
18 |
19 | from meilisearch_python_sdk import AsyncClient, Client
20 | from meilisearch_python_sdk._task import async_wait_for_task, wait_for_task
21 | from meilisearch_python_sdk.json_handler import OrjsonHandler, UjsonHandler
22 | from meilisearch_python_sdk.models.settings import (
23 | Embedders,
24 | Faceting,
25 | LocalizedAttributes,
26 | MeilisearchSettings,
27 | Pagination,
28 | ProximityPrecision,
29 | TypoTolerance,
30 | UserProvidedEmbedder,
31 | )
32 |
33 | MASTER_KEY = "masterKey"
34 |
35 | ROOT_PATH = Path().absolute()
36 | SMALL_MOVIES_PATH = ROOT_PATH / "datasets" / "small_movies.json"
37 |
38 |
39 | def pytest_addoption(parser):
40 | parser.addoption("--http2", action="store_true")
41 |
42 |
43 | @pytest.fixture(scope="session")
44 | def http2_enabled(request):
45 | return request.config.getoption("--http2")
46 |
47 |
48 | @pytest.fixture(scope="session")
49 | def ssl_verify(http2_enabled):
50 | if truststore: # truststore is installed
51 | if http2_enabled:
52 | # http2 needs ssl so best to use truststore to make things work with mkcert
53 | return truststore.SSLContext(ssl.PROTOCOL_TLS_CLIENT) if http2_enabled else True
54 | return True # recommended default
55 | else: # truststore isn't installed
56 | if sys.version_info >= (3, 10): # should be available in 3.10+
57 | warning("truststore not installed, your environment may be broken run uv sync")
58 | # without truststore we can't verify the ssl (and when http2 is enabled, verification must be disabled)
59 | return not http2_enabled
60 |
61 |
62 | @pytest.fixture(scope="session")
63 | async def async_client(base_url, ssl_verify):
64 | async with AsyncClient(base_url, MASTER_KEY, verify=ssl_verify) as client:
65 | yield client
66 |
67 |
68 | @pytest.fixture(scope="session")
69 | async def async_client_orjson_handler(base_url, ssl_verify):
70 | async with AsyncClient(
71 | base_url, MASTER_KEY, json_handler=OrjsonHandler(), verify=ssl_verify
72 | ) as client:
73 | yield client
74 |
75 |
76 | @pytest.fixture(scope="session")
77 | async def async_client_ujson_handler(base_url, ssl_verify):
78 | async with AsyncClient(
79 | base_url, MASTER_KEY, json_handler=UjsonHandler(), verify=ssl_verify
80 | ) as client:
81 | yield client
82 |
83 |
84 | @pytest.fixture(scope="session")
85 | async def async_client_with_plugins(base_url, ssl_verify):
86 | async with AsyncClient(base_url, MASTER_KEY, verify=ssl_verify) as client:
87 | yield client
88 |
89 |
90 | @pytest.fixture(scope="session")
91 | def client(base_url, ssl_verify):
92 | yield Client(base_url, MASTER_KEY, verify=ssl_verify)
93 |
94 |
95 | @pytest.fixture(scope="session")
96 | def client_orjson_handler(base_url, ssl_verify):
97 | yield Client(base_url, MASTER_KEY, json_handler=OrjsonHandler(), verify=ssl_verify)
98 |
99 |
100 | @pytest.fixture(scope="session")
101 | def client_ujson_handler(base_url, ssl_verify):
102 | yield Client(base_url, MASTER_KEY, json_handler=UjsonHandler(), verify=ssl_verify)
103 |
104 |
105 | @pytest.fixture(autouse=True)
106 | async def clear_indexes(async_client, pytestconfig):
107 | """Auto-clears the indexes after each test function run if not a parallel test."""
108 | if "not no_parallel" != pytestconfig.getoption("-m"):
109 | indexes = await async_client.get_indexes()
110 | if indexes:
111 | tasks = await asyncio.gather(*[async_client.index(x.uid).delete() for x in indexes])
112 | await asyncio.gather(*[async_client.wait_for_task(x.task_uid) for x in tasks])
113 | yield
114 | if "not no_parallel" != pytestconfig.getoption("-m"):
115 | indexes = await async_client.get_indexes()
116 | if indexes:
117 | tasks = await asyncio.gather(*[async_client.index(x.uid).delete() for x in indexes])
118 | await asyncio.gather(*[async_client.wait_for_task(x.task_uid) for x in tasks])
119 |
120 |
121 | @pytest.fixture(scope="session")
122 | def master_key():
123 | return MASTER_KEY
124 |
125 |
126 | @pytest.fixture(scope="session")
127 | def base_url(http2_enabled):
128 | schema = "https" if http2_enabled else "http"
129 | return f"{schema}://127.0.0.1:7700"
130 |
131 |
132 | @pytest.fixture
133 | async def async_indexes_sample(async_client):
134 | index_info = [
135 | {"uid": str(uuid4())},
136 | {"uid": str(uuid4()), "primary_key": "book_id"},
137 | ]
138 | indexes = []
139 | for index_args in index_info:
140 | index = await async_client.create_index(**index_args)
141 | indexes.append(index)
142 | yield indexes, index_info[0]["uid"], index_info[1]["uid"]
143 |
144 |
145 | @pytest.fixture
146 | def indexes_sample(client):
147 | index_info = [
148 | {"uid": str(uuid4())},
149 | {"uid": str(uuid4()), "primary_key": "book_id"},
150 | ]
151 | indexes = []
152 | for index_args in index_info:
153 | index = client.create_index(**index_args)
154 | indexes.append(index)
155 | yield indexes, index_info[0]["uid"], index_info[1]["uid"]
156 |
157 |
158 | @pytest.fixture
159 | def small_movies():
160 | with open(SMALL_MOVIES_PATH) as movie_file:
161 | yield json.loads(movie_file.read())
162 |
163 |
164 | @pytest.fixture
165 | def small_movies_csv_path(small_movies, tmp_path):
166 | file_path = tmp_path / "small_movies.csv"
167 | with open(file_path, "w") as f:
168 | field_names = list(small_movies[0].keys())
169 | writer = csv.DictWriter(f, fieldnames=field_names, quoting=csv.QUOTE_MINIMAL)
170 | writer.writeheader()
171 | writer.writerows(small_movies)
172 |
173 | return file_path
174 |
175 |
176 | @pytest.fixture
177 | def small_movies_csv_path_semicolon_delimiter(small_movies, tmp_path):
178 | file_path = tmp_path / "small_movies.csv"
179 | with open(file_path, "w") as f:
180 | field_names = list(small_movies[0].keys())
181 | writer = csv.DictWriter(f, fieldnames=field_names, quoting=csv.QUOTE_MINIMAL, delimiter=";")
182 | writer.writeheader()
183 | writer.writerows(small_movies)
184 |
185 | return file_path
186 |
187 |
188 | @pytest.fixture
189 | def small_movies_ndjson_path(small_movies, tmp_path):
190 | file_path = tmp_path / "small_movies.ndjson"
191 | nd_json = [json.dumps(x) for x in small_movies]
192 | with open(file_path, "w") as f:
193 | for line in nd_json:
194 | f.write(f"{line}\n")
195 |
196 | return file_path
197 |
198 |
199 | @pytest.fixture(scope="session")
200 | def small_movies_path():
201 | return SMALL_MOVIES_PATH
202 |
203 |
204 | @pytest.fixture
205 | async def async_empty_index(async_client):
206 | async def index_maker():
207 | return await async_client.create_index(uid=str(uuid4()), timeout_in_ms=5000)
208 |
209 | return index_maker
210 |
211 |
212 | @pytest.fixture
213 | def empty_index(client):
214 | def index_maker():
215 | return client.create_index(uid=str(uuid4()), timeout_in_ms=5000)
216 |
217 | return index_maker
218 |
219 |
220 | @pytest.fixture
221 | async def async_index_with_documents(async_empty_index, small_movies):
222 | async def index_maker(documents=small_movies):
223 | index = await async_empty_index()
224 | response = await index.add_documents(documents)
225 | await async_wait_for_task(index.http_client, response.task_uid)
226 | return index
227 |
228 | return index_maker
229 |
230 |
231 | @pytest.fixture
232 | def index_with_documents(empty_index, small_movies):
233 | def index_maker(documents=small_movies):
234 | index = empty_index()
235 | response = index.add_documents(documents)
236 | wait_for_task(index.http_client, response.task_uid)
237 | return index
238 |
239 | return index_maker
240 |
241 |
242 | @pytest.fixture
243 | async def async_index_with_documents_and_vectors(async_empty_index, small_movies):
244 | small_movies[0]["_vectors"] = {"default": [0.1, 0.2]}
245 | for movie in small_movies[1:]:
246 | movie["_vectors"] = {"default": [0.9, 0.9]}
247 |
248 | async def index_maker(documents=small_movies):
249 | index = await async_empty_index()
250 | task = await index.update_embedders(
251 | Embedders(embedders={"default": UserProvidedEmbedder(dimensions=2)})
252 | )
253 | await async_wait_for_task(index.http_client, task.task_uid)
254 |
255 | response = await index.add_documents(documents)
256 | await async_wait_for_task(index.http_client, response.task_uid)
257 | return index
258 |
259 | return index_maker
260 |
261 |
262 | @pytest.fixture
263 | def index_with_documents_and_vectors(empty_index, small_movies):
264 | small_movies[0]["_vectors"] = {"default": [0.1, 0.2]}
265 | for movie in small_movies[1:]:
266 | movie["_vectors"] = {"default": [0.9, 0.9]}
267 |
268 | def index_maker(documents=small_movies):
269 | index = empty_index()
270 | task = index.update_embedders(
271 | Embedders(embedders={"default": UserProvidedEmbedder(dimensions=2)})
272 | )
273 | wait_for_task(index.http_client, task.task_uid)
274 | response = index.add_documents(documents)
275 | wait_for_task(index.http_client, response.task_uid)
276 | return index
277 |
278 | return index_maker
279 |
280 |
281 | @pytest.fixture
282 | async def default_search_key(async_client):
283 | keys = await async_client.get_keys()
284 |
285 | for key in keys.results:
286 | if key.actions == ["search"]:
287 | return key
288 |
289 |
290 | @pytest.fixture(scope="session", autouse=True)
291 | async def enable_edit_by_function(base_url, ssl_verify):
292 | async with HttpxAsyncClient(
293 | base_url=base_url, headers={"Authorization": f"Bearer {MASTER_KEY}"}, verify=ssl_verify
294 | ) as client:
295 | await client.patch("/experimental-features", json={"editDocumentsByFunction": True})
296 | yield
297 |
298 |
299 | @pytest.fixture(scope="session", autouse=True)
300 | async def enable_network(base_url, ssl_verify):
301 | async with HttpxAsyncClient(
302 | base_url=base_url, headers={"Authorization": f"Bearer {MASTER_KEY}"}, verify=ssl_verify
303 | ) as client:
304 | await client.patch("/experimental-features", json={"network": True})
305 | yield
306 |
307 |
308 | @pytest.fixture
309 | async def create_tasks(async_empty_index, small_movies):
310 | """Ensures there are some tasks present for testing."""
311 | index = await async_empty_index()
312 | await index.update_ranking_rules(["typo", "exactness"])
313 | await index.reset_ranking_rules()
314 | await index.add_documents(small_movies)
315 | await index.add_documents(small_movies)
316 |
317 |
318 | @pytest.fixture
319 | def new_settings():
320 | return MeilisearchSettings(
321 | ranking_rules=["typo", "words"],
322 | searchable_attributes=["title", "overview"],
323 | sortable_attributes=["genre", "title"],
324 | typo_tolerance=TypoTolerance(enabled=False),
325 | faceting=Faceting(max_values_per_facet=123),
326 | pagination=Pagination(max_total_hits=17),
327 | separator_tokens=["&sep", "/", "|"],
328 | non_separator_tokens=["#", "@"],
329 | search_cutoff_ms=100,
330 | dictionary=["S.O", "S.O.S"],
331 | proximity_precision=ProximityPrecision.BY_ATTRIBUTE,
332 | facet_search=False,
333 | prefix_search="disabled",
334 | )
335 |
336 |
337 | @pytest.fixture
338 | def new_settings_localized():
339 | return MeilisearchSettings(
340 | ranking_rules=["typo", "words"],
341 | searchable_attributes=["title", "overview"],
342 | sortable_attributes=["genre", "title"],
343 | typo_tolerance=TypoTolerance(enabled=False),
344 | faceting=Faceting(max_values_per_facet=123),
345 | pagination=Pagination(max_total_hits=17),
346 | separator_tokens=["&sep", "/", "|"],
347 | non_separator_tokens=["#", "@"],
348 | search_cutoff_ms=100,
349 | dictionary=["S.O", "S.O.S"],
350 | proximity_precision=ProximityPrecision.BY_ATTRIBUTE,
351 | localized_attributes=[
352 | LocalizedAttributes(locales=["eng", "spa"], attribute_patterns=["*"]),
353 | LocalizedAttributes(locales=["ita"], attribute_patterns=["*_it"]),
354 | ],
355 | )
356 |
--------------------------------------------------------------------------------
/tests/test_decorators.py:
--------------------------------------------------------------------------------
1 | from uuid import uuid4
2 |
3 | import pytest
4 |
5 | from meilisearch_python_sdk.decorators import (
6 | ConnectionInfo,
7 | add_documents,
8 | async_add_documents,
9 | )
10 |
11 |
12 | @pytest.mark.parametrize("batch_size, primary_key", ((None, None), (10, "alternate")))
13 | def test_add_documents_with_client(batch_size, primary_key, client, ssl_verify):
14 | index_name = str(uuid4())
15 | documents = []
16 |
17 | for i in range(50):
18 | documents.append({"id": i, "alternate": i, "title": f"Title {i}"})
19 |
20 | @add_documents(
21 | index_name=index_name,
22 | connection_info=client,
23 | batch_size=batch_size,
24 | primary_key=primary_key,
25 | wait_for_task=True,
26 | verify=ssl_verify,
27 | )
28 | def tester():
29 | return documents
30 |
31 | tester()
32 |
33 | index = client.get_index(index_name)
34 |
35 | if primary_key:
36 | assert index.primary_key == primary_key
37 |
38 | result = index.get_documents(limit=50)
39 | assert result.results == documents
40 |
41 |
42 | @pytest.mark.parametrize("batch_size, primary_key", ((None, None), (10, "alternate")))
43 | def test_add_documents_with_connection_info(
44 | batch_size, primary_key, client, base_url, master_key, ssl_verify
45 | ):
46 | index_name = str(uuid4())
47 | documents = []
48 |
49 | for i in range(50):
50 | documents.append({"id": i, "alternate": i, "title": f"Title {i}"})
51 |
52 | @add_documents(
53 | index_name=index_name,
54 | connection_info=ConnectionInfo(url=base_url, api_key=master_key),
55 | batch_size=batch_size,
56 | primary_key=primary_key,
57 | wait_for_task=True,
58 | verify=ssl_verify,
59 | )
60 | def tester():
61 | return documents
62 |
63 | tester()
64 |
65 | index = client.get_index(index_name)
66 |
67 | if primary_key:
68 | assert index.primary_key == primary_key
69 |
70 | result = index.get_documents(limit=50)
71 | assert result.results == documents
72 |
73 |
74 | @pytest.mark.parametrize("batch_size, primary_key", ((None, None), (10, "alternate")))
75 | async def test_async_add_documents_with_client(batch_size, primary_key, async_client):
76 | index_name = str(uuid4())
77 | documents = []
78 |
79 | for i in range(50):
80 | documents.append({"id": i, "alternate": i, "title": f"Title {i}"})
81 |
82 | @async_add_documents(
83 | index_name=index_name,
84 | connection_info=async_client,
85 | batch_size=batch_size,
86 | primary_key=primary_key,
87 | wait_for_task=True,
88 | )
89 | async def tester():
90 | return documents
91 |
92 | await tester()
93 |
94 | index = await async_client.get_index(index_name)
95 |
96 | if primary_key:
97 | assert index.primary_key == primary_key
98 |
99 | result = await index.get_documents(limit=50)
100 |
101 | # order will be random since documents were added async so sort them first.
102 | assert sorted(result.results, key=lambda x: x["id"]) == documents
103 |
104 |
105 | @pytest.mark.parametrize("batch_size, primary_key", ((None, None), (10, "alternate")))
106 | async def test_async_add_documents_with_connection_info(
107 | batch_size, primary_key, async_client, base_url, master_key, ssl_verify
108 | ):
109 | index_name = str(uuid4())
110 | documents = []
111 |
112 | for i in range(50):
113 | documents.append({"id": i, "alternate": i, "title": f"Title {i}"})
114 |
115 | @async_add_documents(
116 | index_name=index_name,
117 | connection_info=ConnectionInfo(url=base_url, api_key=master_key),
118 | batch_size=batch_size,
119 | primary_key=primary_key,
120 | wait_for_task=True,
121 | verify=ssl_verify,
122 | )
123 | async def tester():
124 | return documents
125 |
126 | await tester()
127 |
128 | index = await async_client.get_index(index_name)
129 |
130 | if primary_key:
131 | assert index.primary_key == primary_key
132 |
133 | result = await index.get_documents(limit=50)
134 |
135 | # order will be random since documents were added async so sort them first.
136 | assert sorted(result.results, key=lambda x: x["id"]) == documents
137 |
--------------------------------------------------------------------------------
/tests/test_errors.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | from httpx import AsyncClient as HttpxAsyncClient
3 | from httpx import Client as HttpxClient
4 | from httpx import HTTPStatusError, Request, Response
5 |
6 | from meilisearch_python_sdk.errors import (
7 | MeilisearchApiError,
8 | MeilisearchCommunicationError,
9 | MeilisearchError,
10 | MeilisearchTaskFailedError,
11 | MeilisearchTimeoutError,
12 | )
13 |
14 |
15 | def test_meilisearch_api_error():
16 | expected = "test"
17 | got = MeilisearchApiError(expected, Response(status_code=404))
18 |
19 | assert expected in str(got)
20 |
21 |
22 | def test_meilisearch_communication_error():
23 | expected = "test"
24 | got = MeilisearchCommunicationError(expected)
25 |
26 | assert expected in str(got)
27 |
28 |
29 | def test_meilisearch_error():
30 | expected = "test message"
31 | got = MeilisearchError(expected)
32 |
33 | assert expected in str(got)
34 |
35 |
36 | def test_meilisearch_task_failed_error():
37 | expected = "test message"
38 | got = MeilisearchTaskFailedError(expected)
39 |
40 | assert expected in str(got)
41 |
42 |
43 | def test_meilisearch_timeout_error():
44 | expected = "test"
45 | got = MeilisearchTimeoutError(expected)
46 |
47 | assert expected in str(got)
48 |
49 |
50 | async def test_non_json_error_async(async_index_with_documents, monkeypatch):
51 | async def mock_post_response(*args, **kwargs):
52 | return Response(
53 | status_code=504, text="test", request=Request("POST", url="http://localhost")
54 | )
55 |
56 | monkeypatch.setattr(HttpxAsyncClient, "post", mock_post_response)
57 | with pytest.raises(HTTPStatusError):
58 | await async_index_with_documents()
59 |
60 |
61 | def test_non_json_error(index_with_documents, monkeypatch):
62 | def mock_post_response(*args, **kwargs):
63 | return Response(
64 | status_code=504, text="test", request=Request("POST", url="http://localhost")
65 | )
66 |
67 | monkeypatch.setattr(HttpxClient, "post", mock_post_response)
68 | with pytest.raises(HTTPStatusError):
69 | index_with_documents()
70 |
--------------------------------------------------------------------------------
/tests/test_utils.py:
--------------------------------------------------------------------------------
1 | from datetime import datetime, timezone
2 |
3 | import pytest
4 |
5 | from meilisearch_python_sdk._utils import iso_to_date_time
6 |
7 |
8 | @pytest.mark.parametrize(
9 | "iso_date, expected",
10 | (
11 | ("2021-05-11T03:12:22.563960100Z", datetime(2021, 5, 11, 3, 12, 22, 563960)),
12 | (
13 | "2021-05-11T03:12:22.563960100+00:00",
14 | datetime(2021, 5, 11, 3, 12, 22, 563960),
15 | ),
16 | (
17 | datetime(2021, 5, 11, 3, 12, 22, 563960),
18 | datetime(2021, 5, 11, 3, 12, 22, 563960),
19 | ),
20 | (
21 | datetime(2023, 7, 12, 1, 40, 11, 993699, tzinfo=timezone.utc),
22 | datetime(2023, 7, 12, 1, 40, 11, 993699, tzinfo=timezone.utc),
23 | ),
24 | (None, None),
25 | ),
26 | )
27 | def test_iso_to_date_time(iso_date, expected):
28 | converted = iso_to_date_time(iso_date)
29 |
30 | assert converted == expected
31 |
32 |
33 | def test_iso_to_date_time_invalid_format():
34 | with pytest.raises(ValueError):
35 | iso_to_date_time("2023-07-13T23:37:20Z")
36 |
--------------------------------------------------------------------------------
/tests/test_version.py:
--------------------------------------------------------------------------------
1 | from meilisearch_python_sdk import __version__
2 | from meilisearch_python_sdk._http_requests import user_agent
3 |
4 |
5 | def test_user_agent():
6 | assert user_agent() == f"Meilisearch Python SDK (v{__version__})"
7 |
--------------------------------------------------------------------------------