├── .github
├── release-drafter-config.yml
└── workflows
│ ├── code_quality.yml
│ ├── commitlint.yml
│ ├── release-drafter.yml
│ ├── release.yml
│ └── unittest.yml
├── .gitignore
├── .pre-commit-config.yaml
├── .readthedocs.yaml
├── CHANGELOG.rst
├── CHANGELOG_EN.rst
├── Dockerfile
├── LICENSE
├── Makefile
├── README.md
├── README_ZH.md
├── codecov.yml
├── docs
├── Makefile
├── basic
│ ├── images
│ │ ├── 2-1-1.png
│ │ ├── 2-1-2.png
│ │ ├── 2-2-1.png
│ │ ├── 2-2-4.png
│ │ ├── 2-3-1.png
│ │ ├── 2-4-0.png
│ │ ├── 2-4-1.png
│ │ └── 2-5-1.png
│ └── readme.md
├── requirements.txt
└── source
│ ├── _static
│ └── custom.css
│ ├── _templates
│ └── layout.html
│ ├── advance_usage
│ ├── index.rst
│ └── store-configuration.rst
│ ├── api.rst
│ ├── benchmarks.rst
│ ├── changelog.rst
│ ├── conf.py
│ ├── index.rst
│ ├── installation.rst
│ └── quickstart
│ ├── context-manager.rst
│ ├── decorator.rst
│ ├── function-call.rst
│ ├── index.rst
│ ├── quota-configuration.rst
│ ├── specifying-algorithms.rst
│ ├── store-backends.rst
│ └── wait-retry.rst
├── examples
├── __init__.py
└── quickstart
│ ├── __init__.py
│ ├── async
│ ├── __init__.py
│ ├── context_manager_example.py
│ ├── decorator_example.py
│ ├── function_call_example.py
│ ├── memory_example.py
│ ├── quickstart_example.py
│ ├── redis_example.py
│ ├── using_algorithm_example.py
│ ├── wait_retry_concurrent_example.py
│ ├── wait_retry_example.py
│ └── wait_retry_function_call_example.py
│ ├── context_manager_example.py
│ ├── decorator_example.py
│ ├── function_call_example.py
│ ├── memory_example.py
│ ├── quickstart_example.py
│ ├── quota_example.py
│ ├── redis_example.py
│ ├── using_algorithm_example.py
│ ├── wait_retry_concurrent_example.py
│ ├── wait_retry_example.py
│ └── wait_retry_function_call_example.py
├── poetry.lock
├── pyproject.toml
├── tests
├── __init__.py
├── asyncio
│ ├── __init__.py
│ ├── benchmarks
│ │ ├── __init__.py
│ │ └── test_throttled.py
│ ├── conftest.py
│ ├── rate_limiter
│ │ ├── __init__.py
│ │ ├── test_fixed_window.py
│ │ ├── test_gcra.py
│ │ ├── test_leaking_bucket.py
│ │ ├── test_sliding_window.py
│ │ └── test_token_bucket.py
│ ├── store
│ │ ├── __init__.py
│ │ ├── test_memory.py
│ │ └── test_store.py
│ └── test_throttled.py
├── benchmarks
│ ├── __init__.py
│ └── test_throttled.py
├── conftest.py
├── rate_limiter
│ ├── __init__.py
│ ├── parametrizes.py
│ ├── test_base.py
│ ├── test_fixed_window.py
│ ├── test_gcra.py
│ ├── test_leaking_bucket.py
│ ├── test_sliding_window.py
│ └── test_token_bucket.py
├── store
│ ├── __init__.py
│ ├── parametrizes.py
│ ├── test_memory.py
│ ├── test_redis_pool.py
│ └── test_store.py
├── test_throttled.py
├── test_utils.py
└── utils.py
└── throttled
├── __init__.py
├── asyncio
├── __init__.py
├── rate_limiter
│ ├── __init__.py
│ ├── base.py
│ ├── fixed_window.py
│ ├── gcra.py
│ ├── leaking_bucket.py
│ ├── sliding_window.py
│ └── token_bucket.py
├── store
│ ├── __init__.py
│ ├── base.py
│ ├── memory.py
│ └── redis.py
└── throttled.py
├── constants.py
├── exceptions.py
├── rate_limiter
├── __init__.py
├── base.py
├── fixed_window.py
├── gcra.py
├── leaking_bucket.py
├── lua
│ ├── fixed_window.lua
│ ├── gcra.lua
│ ├── gcra_peek.lua
│ ├── leaking_bucket.lua
│ ├── sliding_window.lua
│ └── token_bucket.lua
├── sliding_window.py
└── token_bucket.py
├── store
├── __init__.py
├── base.py
├── memory.py
├── redis.py
└── redis_pool.py
├── throttled.py
├── types.py
└── utils.py
/.github/release-drafter-config.yml:
--------------------------------------------------------------------------------
1 | name-template: "v$RESOLVED_VERSION"
2 | tag-template: "v$RESOLVED_VERSION"
3 | change-template: "- $TITLE @$AUTHOR (#$NUMBER)"
4 | change-title-escapes: '\<*_&'
5 | category-template: "### $TITLE"
6 | version-resolver:
7 | major:
8 | labels:
9 | - "SemVer/MAJOR"
10 | - "kind/breaking-change"
11 | minor:
12 | labels:
13 | - "SemVer/MINOR"
14 | - "kind/feat"
15 | patch:
16 | labels:
17 | - "SemVer/PATCH"
18 | default: patch
19 |
20 | exclude-labels:
21 | - "ci/skip-changelog"
22 |
23 | autolabeler:
24 | - label: "kind/breaking-change"
25 | body: "/.*BREAKING CHANGE.*/"
26 | - label: "ci/skip-changelog"
27 | body: "/^docs: release.+/"
28 | - label: "kind/feat"
29 | title: "/^feat: .*/"
30 | - label: "kind/fix"
31 | title: "/^fix: .*/"
32 | - label: "kind/perf"
33 | title: "/^perf: .*/"
34 | - label: "kind/refactor"
35 | title: "/^refactor: .*/"
36 | - label: "kind/test"
37 | title: "/^test: .*/"
38 | - label: "kind/docs"
39 | title: "/^docs: .*/"
40 | - label: "kind/style"
41 | title: "/^style: .*/"
42 | - label: "kind/build"
43 | title: "/^build: .*/"
44 | - label: "kind/ci"
45 | title: "/^ci: .*/"
46 |
47 | categories:
48 | - title: "🔥 Breaking Changes"
49 | labels:
50 | - "kind/breaking-change"
51 | - title: "🐛 Bug Fixes"
52 | labels:
53 | - "kind/fix"
54 | - title: "🚀 New Features"
55 | labels:
56 | - "kind/feat"
57 | - title: "🧪 Tests"
58 | labels:
59 | - "kind/test"
60 | - title: "📝 Documentation"
61 | labels:
62 | - "kind/docs"
63 | - title: "✨ Improvements"
64 | labels:
65 | - "kind/perf"
66 | - "kind/refactor"
67 | - title: "📦 Dependencies"
68 | labels:
69 | - "kind/build"
70 | - title: "🍃 Maintenance"
71 | labels:
72 | - "kind/style"
73 | - "kind/ci"
74 |
75 | template: |
76 | ## v$RESOLVED_VERSION - YYYY-MM-DD
77 |
78 | [简体中文](https://github.com/$OWNER/$REPOSITORY/blob/main/CHANGELOG.rst#vxxx---YYYY-MM-DD) | English
79 |
80 | $CHANGES
81 |
82 | **Full Changelog**: https://github.com/$OWNER/$REPOSITORY/compare/$PREVIOUS_TAG...v$RESOLVED_VERSION
83 |
--------------------------------------------------------------------------------
/.github/workflows/code_quality.yml:
--------------------------------------------------------------------------------
1 | name: Code Quality
2 |
3 | on:
4 | push:
5 | branches: [main]
6 | pull_request: {}
7 |
8 | # https://docs.github.com/en/actions/using-jobs/assigning-permissions-to-jobs
9 | # `contents` is for permission to the contents of the repository.
10 | # `pull-requests` is for permission to pull request
11 | permissions:
12 | contents: write
13 | checks: write
14 | pull-requests: write
15 |
16 | jobs:
17 | build:
18 | runs-on: ubuntu-latest
19 | env:
20 | python-version: 3.8
21 | poetry-version: 1.8.5
22 | steps:
23 | - uses: actions/checkout@v4
24 | - uses: actions/setup-python@v5
25 | with:
26 | python-version: ${{ env.python-version }}
27 | - name: Install Poetry
28 | uses: abatilo/actions-poetry@v4
29 | with:
30 | poetry-version: ${{ env.poetry-version }}
31 | - name: Setup Poetry
32 | run: |
33 | poetry config virtualenvs.create true --local
34 | poetry config virtualenvs.in-project true --local
35 | - name: Define a cache for the virtual environment
36 | uses: actions/cache@v3
37 | with:
38 | path: ./.venv
39 | key: venv-${{ env.python-version }}-${{ env.poetry-version }}-${{ hashFiles('poetry.lock')}}
40 | - name: Define a cache for the pre-commit virtual environment
41 | uses: actions/cache@v3
42 | with:
43 | path: ~/.cache/pre-commit
44 | key: venv-precommit-${{ env.python-version }}-${{ env.poetry-version }}-${{ hashFiles('poetry.lock')}}
45 | - name: Install the project dependencies
46 | run: poetry install
47 | - name: Pre-commit run
48 | run: poetry run pre-commit run --show-diff-on-failure --color=always --all-files
49 | shell: bash
50 | - name: Run tests
51 | run: poetry run pytest --junitxml=pytest.xml --cov-report=term-missing:skip-covered --cov=throttled tests/ | tee pytest-coverage.txt
52 | - name: Pytest coverage comment
53 | uses: MishaKav/pytest-coverage-comment@main
54 | with:
55 | pytest-coverage-path: ./pytest-coverage.txt
56 | junitxml-path: ./pytest.xml
57 | github-token: ${{ secrets.PAT }}
58 |
--------------------------------------------------------------------------------
/.github/workflows/commitlint.yml:
--------------------------------------------------------------------------------
1 | name: Lint Commit Messages
2 | on: [pull_request]
3 |
4 | permissions:
5 | contents: read
6 | pull-requests: read
7 |
8 | jobs:
9 | commitlint:
10 | runs-on: ubuntu-latest
11 | steps:
12 | - uses: actions/checkout@v4
13 | - uses: wagoid/commitlint-github-action@v6
14 | with:
15 | token: ${{ secrets.PAT }}
16 |
--------------------------------------------------------------------------------
/.github/workflows/release-drafter.yml:
--------------------------------------------------------------------------------
1 | name: Release Drafter
2 |
3 | on:
4 | push:
5 | branches:
6 | - main
7 |
8 | pull_request:
9 | types: [opened, reopened, synchronize]
10 |
11 | pull_request_target:
12 | types: [opened, reopened, synchronize]
13 |
14 | permissions: {}
15 |
16 | jobs:
17 | update_release_draft:
18 | permissions:
19 | # write permission is required to create a GitHub release
20 | contents: write
21 | # write permission is required for autolabeler
22 | # otherwise, read permission is required at least
23 | pull-requests: write
24 | runs-on: ubuntu-latest
25 | steps:
26 | # Drafts your next Release notes as Pull Requests are merged into "master"
27 | - uses: release-drafter/release-drafter@v6
28 | with:
29 | config-name: release-drafter-config.yml
30 | disable-autolabeler: false
31 | env:
32 | GITHUB_TOKEN: ${{ secrets.PAT }}
33 |
--------------------------------------------------------------------------------
/.github/workflows/release.yml:
--------------------------------------------------------------------------------
1 | name: Publish to Pypi
2 | on:
3 | push:
4 | tags:
5 | - "v[0-9]+.[0-9]+.[0-9]+"
6 |
7 | jobs:
8 | build:
9 | runs-on: ubuntu-latest
10 | steps:
11 | - uses: actions/checkout@v4
12 | - name: Build and publish to Pypi
13 | uses: JRubics/poetry-publish@v2.1
14 | with:
15 | python_version: "3.11.3"
16 | poetry_version: "==1.8.5"
17 | poetry_install_options: "--without dev"
18 | repository_name: "throttled-py"
19 | pypi_token: ${{ secrets.PYPI_TOKEN }}
20 |
--------------------------------------------------------------------------------
/.github/workflows/unittest.yml:
--------------------------------------------------------------------------------
1 | name: Unittest
2 |
3 | on:
4 | push:
5 | branches: [main]
6 | pull_request: {}
7 |
8 | jobs:
9 | ci:
10 | strategy:
11 | fail-fast: false
12 | matrix:
13 | python-version: ["3.8", "3.9", "3.10", "3.11", "3.12", "3.13"]
14 | poetry-version: ["latest", "main", "1.8.5"]
15 | exclude:
16 | - python-version: "3.8"
17 | poetry-version: "main"
18 | env:
19 | MAINLINE_PYTHON_VERSION: "3.12"
20 | MAINLINE_POETRY_VERSION: "1.8.5"
21 | runs-on: ubuntu-latest
22 | steps:
23 | - uses: actions/checkout@v4
24 | - uses: actions/setup-python@v5
25 | with:
26 | python-version: ${{ matrix.python-version }}
27 | - name: Install Poetry
28 | uses: abatilo/actions-poetry@v4
29 | with:
30 | poetry-version: ${{ matrix.poetry-version }}
31 | - name: Setup Poetry
32 | run: |
33 | poetry config virtualenvs.create true --local
34 | poetry config virtualenvs.in-project true --local
35 | - name: Define a cache for the virtual environment
36 | uses: actions/cache@v3
37 | with:
38 | path: ./.venv
39 | key: venv-${{ matrix.python-version }}-${{ matrix.poetry-version }}-${{ hashFiles('poetry.lock')}}
40 | - name: Install the project dependencies
41 | run: poetry install
42 | - name: Run tests
43 | run: poetry run pytest --cov=throttled --cov-branch --cov-report=xml
44 | - name: Upload coverage reports to Codecov
45 | uses: codecov/codecov-action@v5
46 | with:
47 | token: ${{ secrets.CODECOV_TOKEN }}
48 | if: ${{ env.MAINLINE_PYTHON_VERSION == matrix.python-version && env.MAINLINE_POETRY_VERSION == matrix.poetry-version }}
49 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | share/python-wheels/
24 | *.egg-info/
25 | .installed.cfg
26 | *.egg
27 | MANIFEST
28 |
29 | # PyInstaller
30 | # Usually these files are written by a python script from a template
31 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
32 | *.manifest
33 | *.spec
34 |
35 | # Installer logs
36 | pip-log.txt
37 | pip-delete-this-directory.txt
38 |
39 | # Unit test / coverage reports
40 | htmlcov/
41 | .tox/
42 | .nox/
43 | .coverage
44 | .coverage.*
45 | .cache
46 | nosetests.xml
47 | coverage.xml
48 | *.cover
49 | *.py,cover
50 | .hypothesis/
51 | .pytest_cache/
52 | cover/
53 |
54 | # Translations
55 | *.mo
56 | *.pot
57 |
58 | # Django stuff:
59 | *.log
60 | local_settings.py
61 | db.sqlite3
62 | db.sqlite3-journal
63 |
64 | # Flask stuff:
65 | instance/
66 | .webassets-cache
67 |
68 | # Scrapy stuff:
69 | .scrapy
70 |
71 | # Sphinx documentation
72 | docs/_build/
73 |
74 | # PyBuilder
75 | .pybuilder/
76 | target/
77 |
78 | # Jupyter Notebook
79 | .ipynb_checkpoints
80 |
81 | # IPython
82 | profile_default/
83 | ipython_config.py
84 |
85 | # pyenv
86 | # For a library or package, you might want to ignore these files since the code is
87 | # intended to run in multiple environments; otherwise, check them in:
88 | # .python-version
89 |
90 | # pipenv
91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
94 | # install all needed dependencies.
95 | #Pipfile.lock
96 |
97 | # UV
98 | # Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
99 | # This is especially recommended for binary packages to ensure reproducibility, and is more
100 | # commonly ignored for libraries.
101 | #uv.lock
102 |
103 | # poetry
104 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
105 | # This is especially recommended for binary packages to ensure reproducibility, and is more
106 | # commonly ignored for libraries.
107 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
108 | #poetry.lock
109 |
110 | # pdm
111 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
112 | #pdm.lock
113 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
114 | # in version control.
115 | # https://pdm.fming.dev/latest/usage/project/#working-with-version-control
116 | .pdm.toml
117 | .pdm-python
118 | .pdm-build/
119 |
120 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
121 | __pypackages__/
122 |
123 | # Celery stuff
124 | celerybeat-schedule
125 | celerybeat.pid
126 |
127 | # SageMath parsed files
128 | *.sage.py
129 |
130 | # Environments
131 | .env
132 | .venv
133 | env/
134 | venv/
135 | ENV/
136 | env.bak/
137 | venv.bak/
138 |
139 | # Spyder project settings
140 | .spyderproject
141 | .spyproject
142 |
143 | # Rope project settings
144 | .ropeproject
145 |
146 | # mkdocs documentation
147 | /site
148 |
149 | # mypy
150 | .mypy_cache/
151 | .dmypy.json
152 | dmypy.json
153 |
154 | # Pyre type checker
155 | .pyre/
156 |
157 | # pytype static type analyzer
158 | .pytype/
159 |
160 | # Cython debug symbols
161 | cython_debug/
162 |
163 | # PyCharm
164 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
165 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
166 | # and can be added to the global gitignore or merged into this file. For a more nuclear
167 | # option (not recommended) you can uncomment the following to ignore the entire idea folder.
168 | .idea/
169 |
170 | # PyPI configuration file
171 | .pypirc
172 | .vscode/
173 |
--------------------------------------------------------------------------------
/.pre-commit-config.yaml:
--------------------------------------------------------------------------------
1 | default_stages: [commit]
2 | repos:
3 | - repo: https://github.com/pre-commit/pre-commit-hooks
4 | rev: v3.4.0
5 | hooks:
6 | - id: check-merge-conflict
7 |
8 | - repo: https://github.com/pycqa/flake8
9 | rev: 7.0.0
10 | hooks:
11 | - id: flake8
12 | name: flake8[pyproject-flake8]
13 | language: python
14 | types: [python]
15 | entry: pflake8 --config=pyproject.toml
16 | additional_dependencies:
17 | # https://github.com/csachs/pyproject-flake8/issues/30
18 | - pyproject-flake8==7.0.0
19 | - flake8-typing-imports
20 |
21 | - repo: https://github.com/psf/black
22 | rev: 23.12.1
23 | hooks:
24 | - id: black
25 | name: black
26 | language: python
27 | types: [python]
28 | entry: black --config=pyproject.toml
29 |
30 | - repo: https://github.com/pycqa/isort
31 | rev: 5.13.2
32 | hooks:
33 | - id: isort
34 | name: isort
35 | language: python
36 | types: [python]
37 | entry: isort --settings-path=pyproject.toml
38 |
--------------------------------------------------------------------------------
/.readthedocs.yaml:
--------------------------------------------------------------------------------
1 | # Read the Docs configuration file
2 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
3 |
4 | # Required
5 | version: 2
6 |
7 | # Set the OS, Python version, and other tools you might need
8 | build:
9 | os: ubuntu-24.04
10 | tools:
11 | python: "3.13"
12 |
13 | # Build documentation in the "docs/" directory with Sphinx
14 | sphinx:
15 | configuration: docs/source/conf.py
16 |
17 | # Optionally, but recommended,
18 | # declare the Python requirements required to build your documentation
19 | # See https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html
20 | python:
21 | install:
22 | - requirements: docs/requirements.txt
23 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | ARG PYTHON_VERSION=3.8.12
2 |
3 | FROM python:${PYTHON_VERSION}-slim-buster AS base
4 |
5 | ENV LC_ALL=C.UTF-8 \
6 | LANG=C.UTF-8
7 |
8 | ## PYTHON
9 | # Seems to speed things up
10 | ENV PYTHONUNBUFFERED=1
11 | # Turns off writing .pyc files. Superfluous on an ephemeral container.
12 | ENV PYTHONDONTWRITEBYTECODE=1
13 |
14 | # Ensures that the python and pip executables used
15 | # in the image will be those from our virtualenv.
16 | ENV PATH="/venv/bin:$PATH"
17 |
18 | RUN set -ex && \
19 | chmod 1777 /tmp && \
20 | rm /etc/apt/sources.list && \
21 | echo "deb https://mirrors.cloud.tencent.com/debian buster main contrib non-free" >> /etc/apt/sources.list && \
22 | echo "deb https://mirrors.cloud.tencent.com/debian buster-updates main contrib non-free" >> /etc/apt/sources.list && \
23 | echo "deb-src https://mirrors.cloud.tencent.com/debian buster main contrib non-free" >> /etc/apt/sources.list && \
24 | echo "deb-src https://mirrors.cloud.tencent.com/debian buster-updates main contrib non-free" >> /etc/apt/sources.list
25 |
26 | RUN set -ex && mkdir ~/.pip && printf '[global]\nindex-url = https://mirrors.tencent.com/pypi/simple/' > ~/.pip/pip.conf
27 |
28 |
29 | FROM base AS builder
30 | ARG POETRY_VERSION=1.4.1
31 | ENV POETRY_VERSION=${POETRY_VERSION}
32 |
33 | WORKDIR /
34 |
35 | # Install OS package dependencies.
36 | # Do all of this in one RUN to limit final image size.
37 | RUN set -ex && \
38 | apt-get update && \
39 | apt-get install -y --no-install-recommends \
40 | gcc gettext && \
41 | rm -rf /var/lib/apt/lists/*
42 |
43 | COPY pyproject.toml /
44 | COPY poetry.lock /
45 |
46 | # 创建 Python 虚拟环境并安装依赖
47 | RUN set -ex && \
48 | python -m venv /venv && \
49 | pip install --upgrade pip && \
50 | pip install poetry==${POETRY_VERSION} && \
51 | poetry config virtualenvs.create false
52 |
53 |
54 | RUN set -ex && \
55 | poetry install --no-root
56 |
57 | FROM base AS base-app
58 |
59 | # 安装运行时依赖
60 | RUN set -ex && \
61 | apt-get update && \
62 | apt-get install -y --no-install-recommends \
63 | gettext curl vim && \
64 | rm -rf /var/lib/apt/lists/*
65 |
66 | WORKDIR /app
67 | USER root
68 |
69 | ADD ./ ./
70 |
71 | # 拷贝构件
72 | COPY --from=builder /venv /venv
73 |
74 |
75 | FROM base-app AS app
76 | ENTRYPOINT ["scripts/docker-entrypoint.sh"]
77 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2025 crayon
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | # 定义要用到的命令
2 | POETRY ?= poetry
3 | PIP ?= pip
4 | TWINE ?= twine
5 | # 3.8.12
6 | # 3.9.13
7 | # 3.10.5
8 | # 3.11.10
9 | # 3.12.7
10 | PYTHON_VERSION ?= 3.12.7
11 | POETRY_VERSION ?= 1.8.5
12 | IMAGE_VERSION ?= "dev"
13 | IMAGE_REPO ?= "mirrors.tencent.com/throttled-py"
14 |
15 | # 安装依赖的目标
16 | install:
17 | $(POETRY) install
18 |
19 | # 初始化项目
20 | init:
21 | $(PIP) install --upgrade pip
22 | $(PIP) install poetry==$(POETRY_VERSION)
23 | $(POETRY) install
24 | pre-commit install
25 | pre-commit install --hook-type commit-msg
26 |
27 | # 打包的目标
28 | build:
29 | $(POETRY) build
30 |
31 | # 生成 setup.py 的目标
32 | setup_py:
33 | $(POETRY) run poetry-setup
34 |
35 | # 上传到 PyPI 的目标
36 | upload:
37 | $(TWINE) upload dist/*
38 |
39 | # 上传到 PyPI 测试环境的目标
40 | upload_test:
41 | $(TWINE) upload --repository-url https://test.pypi.org/legacy/ dist/*
42 |
43 | # 设置默认目标:安装依赖、构建并上传到 PyPI
44 | .PHONY: default
45 | default: install build upload
46 |
47 | docker-build-local:
48 | docker build -t ${IMAGE_REPO}:${IMAGE_VERSION}-${PYTHON_VERSION} \
49 | --build-arg PYTHON_VERSION=${PYTHON_VERSION} \
50 | --build-arg POETRY_VERSION=${POETRY_VERSION} .
51 |
--------------------------------------------------------------------------------
/codecov.yml:
--------------------------------------------------------------------------------
1 | # refer: https://docs.codecov.com/docs/commit-status
2 | coverage:
3 | status:
4 | project:
5 | default:
6 | target: 85%
7 | patch:
8 | default:
9 | target: 65%
10 | ignore:
11 | - "**/types.py"
12 |
13 |
--------------------------------------------------------------------------------
/docs/Makefile:
--------------------------------------------------------------------------------
1 | # Minimal makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line, and also
5 | # from the environment for the first two.
6 | SPHINXOPTS ?=
7 | SPHINXBUILD ?= sphinx-build
8 | SOURCEDIR = source
9 | BUILDDIR = build
10 |
11 | # Put it first so that "make" without argument is like "make help".
12 | help:
13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
14 |
15 | .PHONY: help Makefile
16 |
17 | # Catch-all target: route all unknown targets to Sphinx using the new
18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
19 | %: Makefile
20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
21 |
--------------------------------------------------------------------------------
/docs/basic/images/2-1-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZhuoZhuoCrayon/throttled-py/b621009701f911abe59e37806753fe24a4b47c94/docs/basic/images/2-1-1.png
--------------------------------------------------------------------------------
/docs/basic/images/2-1-2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZhuoZhuoCrayon/throttled-py/b621009701f911abe59e37806753fe24a4b47c94/docs/basic/images/2-1-2.png
--------------------------------------------------------------------------------
/docs/basic/images/2-2-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZhuoZhuoCrayon/throttled-py/b621009701f911abe59e37806753fe24a4b47c94/docs/basic/images/2-2-1.png
--------------------------------------------------------------------------------
/docs/basic/images/2-2-4.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZhuoZhuoCrayon/throttled-py/b621009701f911abe59e37806753fe24a4b47c94/docs/basic/images/2-2-4.png
--------------------------------------------------------------------------------
/docs/basic/images/2-3-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZhuoZhuoCrayon/throttled-py/b621009701f911abe59e37806753fe24a4b47c94/docs/basic/images/2-3-1.png
--------------------------------------------------------------------------------
/docs/basic/images/2-4-0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZhuoZhuoCrayon/throttled-py/b621009701f911abe59e37806753fe24a4b47c94/docs/basic/images/2-4-0.png
--------------------------------------------------------------------------------
/docs/basic/images/2-4-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZhuoZhuoCrayon/throttled-py/b621009701f911abe59e37806753fe24a4b47c94/docs/basic/images/2-4-1.png
--------------------------------------------------------------------------------
/docs/basic/images/2-5-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZhuoZhuoCrayon/throttled-py/b621009701f911abe59e37806753fe24a4b47c94/docs/basic/images/2-5-1.png
--------------------------------------------------------------------------------
/docs/requirements.txt:
--------------------------------------------------------------------------------
1 | sphinx
2 | sphinx-book-theme
3 | sphinx_thebe
4 | sphinx-inline-tabs
5 | sphinx-copybutton
6 | sphinx-autodoc-typehints
7 |
--------------------------------------------------------------------------------
/docs/source/_static/custom.css:
--------------------------------------------------------------------------------
1 | /* maple-mono-latin-400-normal */
2 | @font-face {
3 | font-family: 'Maple Mono';
4 | font-style: normal;
5 | font-display: swap;
6 | font-weight: 400;
7 | src: url(https://cdn.jsdelivr.net/fontsource/fonts/maple-mono@latest/latin-400-normal.woff2) format('woff2'),
8 | url(https://cdn.jsdelivr.net/fontsource/fonts/maple-mono@latest/latin-400-normal.woff) format('woff');
9 | }
10 |
11 | /* maple-mono-latin-700-normal (for bold) */
12 | @font-face {
13 | font-family: 'Maple Mono';
14 | font-style: normal;
15 | font-display: swap;
16 | font-weight: 700;
17 | src: url(https://cdn.jsdelivr.net/fontsource/fonts/maple-mono@latest/latin-700-normal.woff2) format('woff2'),
18 | url(https://cdn.jsdelivr.net/fontsource/fonts/maple-mono@latest/latin-700-normal.woff) format('woff');
19 | }
20 |
21 | * {
22 | font-family: 'Maple Mono', monospace;
23 | }
24 |
25 | pre, code, h1, h2, h3, h4, h5, h6 {
26 | font-family: 'Maple Mono', monospace !important;
27 | /*line-height: 1;*/
28 | }
29 |
30 |
31 | pre, code, body, p {
32 | font-size: 14px;
33 | }
34 |
35 | h1 {
36 | font-size: 1.6rem;
37 | }
38 |
39 | h2 {
40 | font-size: 1.4rem;
41 | }
42 |
43 | h3 {
44 | font-size: 1.2rem;
45 | }
46 |
47 | h4 {
48 | font-size: 1rem;
49 | }
50 |
51 | h3, h4, h5, h6 {
52 | margin: 0.9rem 0 0.9rem;
53 | }
54 |
55 | h1, h2 {
56 | margin: 1.2rem 0 1.2rem;
57 | }
58 |
59 | pre, code, .tab-content, .container, .highlight, .docutils.container {
60 | width: 100%;
61 | max-width: 100%;
62 | box-sizing: border-box;
63 | overflow-x: auto;
64 | }
65 |
66 | /* Remove the default margin from the class method blocks */
67 | dl[class]:not(.option-list,.field-list,.footnote,.glossary,.simple) {
68 | margin-bottom: 1rem;
69 | }
70 |
71 | .title {
72 | font-family: 'Maple Mono', "Fallback Outline", monospace !important;
73 | display: inline-flex;
74 | align-items: center;
75 | font-size: 1.8rem !important;
76 | font-style: italic;
77 | font-weight: 700;
78 | font-variation-settings: "ital" 1;
79 | }
80 |
81 | /* Improve bold text visibility in dark mode */
82 | @media (prefers-color-scheme: dark) {
83 | html[data-theme="dark"] strong,
84 | html[data-theme="dark"] b {
85 | /* Use a slightly lighter color for bold text */
86 | color: #EBCB8B; /* Or another light color like #e0e0e0 */
87 | }
88 | }
89 |
90 | /* Fallback for themes that don't use data-theme but might use a class */
91 | .dark-theme strong,
92 | .dark-theme b {
93 | color: #EBCB8B;
94 | }
95 |
--------------------------------------------------------------------------------
/docs/source/_templates/layout.html:
--------------------------------------------------------------------------------
1 | {# Import the theme's layout. #}
2 | {% extends "!layout.html" %}
3 |
4 |
5 |
6 | {# Custom CSS overrides #}
7 | {% set bootswatch_css_custom = ['_static/custom.css'] %}
8 |
--------------------------------------------------------------------------------
/docs/source/advance_usage/index.rst:
--------------------------------------------------------------------------------
1 | =================
2 | Advanced Usage
3 | =================
4 |
5 | Contents
6 | =================
7 |
8 | .. toctree::
9 | :maxdepth: 2
10 |
11 | store-configuration
12 |
--------------------------------------------------------------------------------
/docs/source/api.rst:
--------------------------------------------------------------------------------
1 | =================
2 | API Reference
3 | =================
4 |
5 | .. module:: throttled
6 |
7 |
8 | Main Interface
9 | =================
10 |
11 | .. autoclass:: throttled.Throttled
12 | :inherited-members:
13 | :members:
14 | :special-members: __init__, __call__, __enter__
15 | :exclude-members: limiter
16 |
17 |
18 | Store
19 | =================
20 |
21 | .. autoclass:: throttled.store.BaseStore
22 |
23 |
24 | .. autoclass:: throttled.store.MemoryStore
25 | :special-members: __init__
26 | :show-inheritance:
27 |
28 | .. autoclass:: throttled.store.RedisStore
29 | :special-members: __init__
30 | :show-inheritance:
31 |
32 | Rate Limiting
33 | =================
34 |
35 | .. autoclass:: throttled.rate_limiter.Rate
36 | :members:
37 | :undoc-members:
38 |
39 | .. autoclass:: throttled.rate_limiter.Quota
40 | :members:
41 | :undoc-members:
42 | :exclude-members: period_sec, emission_interval, fill_rate, get_period_sec, get_limit
43 |
44 | .. autofunction:: throttled.rate_limiter.per_sec
45 |
46 | .. autofunction:: throttled.rate_limiter.per_min
47 |
48 | .. autofunction:: throttled.rate_limiter.per_hour
49 |
50 | .. autofunction:: throttled.rate_limiter.per_day
51 |
52 | .. autofunction:: throttled.rate_limiter.per_week
53 |
54 | .. autofunction:: throttled.rate_limiter.per_duration
55 |
56 |
57 | Exceptions
58 | ====================
59 |
60 | All exceptions inherit from :class:`throttled.exceptions.BaseThrottledError`.
61 |
62 | .. autoexception:: throttled.exceptions.BaseThrottledError
63 | .. autoexception:: throttled.exceptions.SetUpError
64 | .. autoexception:: throttled.exceptions.DataError
65 | .. autoexception:: throttled.exceptions.StoreUnavailableError
66 | .. autoexception:: throttled.exceptions.LimitedError
67 | :members:
68 |
69 |
70 | Lower-Level Classes
71 | ====================
72 |
73 | .. autoclass:: throttled.RateLimiterType
74 | :members:
75 | :undoc-members:
76 | :exclude-members: choice
77 |
78 | .. autoclass:: throttled.RateLimitResult
79 | :members:
80 | :undoc-members:
81 |
82 | .. autoclass:: throttled.RateLimitState
83 | :members:
84 | :undoc-members:
85 |
--------------------------------------------------------------------------------
/docs/source/benchmarks.rst:
--------------------------------------------------------------------------------
1 | =================
2 | Benchmarks
3 | =================
4 |
5 | 1) Test Environment
6 | ====================
7 |
8 | - **Python Version**: 3.13.1 (CPython implementation)
9 | - **Operating System**: macOS Darwin 23.6.0 (ARM64 architecture)
10 | - **Redis Version**: 7.x (local connection)
11 |
12 | 2) Performance Metrics
13 | =======================
14 |
15 | > Throughput in req/s, Latency in ms/op.
16 |
17 | +--------------------+---------------------------+----------------------------+----------------------+----------------------+
18 | | Algorithm Type | In-Memory (Single-thread) | In-Memory (16 threads) | Redis (Single-thread)| Redis (16 threads) |
19 | +====================+===========================+============================+======================+======================+
20 | | **Baseline** *[1]* | **1,692,307 / 0.0002** | **135,018 / 0.0004** *[2]* | **17,324 / 0.0571** | **16,803 / 0.9478** |
21 | +--------------------+---------------------------+----------------------------+----------------------+----------------------+
22 | | Fixed Window | 369,635 / 0.0023 | 57,275 / 0.2533 | 16,233 / 0.0610 | 15,835 / 1.0070 |
23 | +--------------------+---------------------------+----------------------------+----------------------+----------------------+
24 | | Sliding Window | 265,215 / 0.0034 | 49,721 / 0.2996 | 12,605 / 0.0786 | 13,371 / 1.1923 |
25 | +--------------------+---------------------------+----------------------------+----------------------+----------------------+
26 | | Token Bucket | 365,678 / 0.0023 | 54,597 / 0.2821 | 13,643 / 0.0727 | 13,219 / 1.2057 |
27 | +--------------------+---------------------------+----------------------------+----------------------+----------------------+
28 | | Leaky Bucket | 364,296 / 0.0023 | 54,136 / 0.2887 | 13,628 / 0.0727 | 12,579 / 1.2667 |
29 | +--------------------+---------------------------+----------------------------+----------------------+----------------------+
30 | | GCRA | 373,906 / 0.0023 | 53,994 / 0.2895 | 12,901 / 0.0769 | 12,861 / 1.2391 |
31 | +--------------------+---------------------------+----------------------------+----------------------+----------------------+
32 |
33 | * *[1] Baseline: In-Memory -* ``dict[key] += 1``, *Redis -* ``INCRBY key increment``
34 | * *[2] In-Memory concurrent baseline uses* ``threading.RLock`` *for thread safety*
35 | * *[3] Performance: In-Memory - ~2.5-4.5x* ``dict[key] += 1`` *operations, Redis - ~1.06-1.37x* ``INCRBY key increment`` *operations*
36 | * *[4] Benchmark code:* `tests/benchmarks/test_throttled.py `_
37 |
--------------------------------------------------------------------------------
/docs/source/changelog.rst:
--------------------------------------------------------------------------------
1 | .. include:: ../../CHANGELOG_EN.rst
2 |
--------------------------------------------------------------------------------
/docs/source/conf.py:
--------------------------------------------------------------------------------
1 | # Configuration file for the Sphinx documentation builder.
2 | #
3 | # For the full list of built-in configuration values, see the documentation:
4 | # https://www.sphinx-doc.org/en/master/usage/configuration.html
5 |
6 | # -- Project information -----------------------------------------------------
7 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
8 |
9 |
10 | import os
11 | import sys
12 | from datetime import datetime
13 |
14 | # Add the project root and the current directory to sys.path.
15 | sys.path.insert(0, os.path.abspath("../../"))
16 | sys.path.insert(0, os.path.abspath("./"))
17 |
18 |
19 | import throttled # noqa: E402
20 |
21 | version = throttled.__version__
22 | release = throttled.__version__
23 | project = "throttled-py"
24 | author = "ZhuoZhuoCrayon"
25 | copyright = f"{datetime.now().year}, Crayon"
26 | description = (
27 | "🔧 High-performance Python rate limiting library "
28 | "with multiple algorithms(Fixed Window, Sliding Window, Token Bucket, "
29 | "Leaky Bucket & GCRA) and storage backends (Redis, In-Memory)."
30 | )
31 |
32 | # html_title = (
33 | # f'{project} '
34 | # f'v{version}'
35 | # )
36 |
37 | html_title = f"{project} v{version}"
38 |
39 | html_theme_options = {
40 | "repository_url": "https://github.com/ZhuoZhuoCrayon/throttled-py",
41 | "use_edit_page_button": False,
42 | "use_source_button": False,
43 | "use_issues_button": True,
44 | "use_repository_button": True,
45 | "use_download_button": False,
46 | "use_sidenotes": True,
47 | "show_toc_level": 2,
48 | }
49 |
50 | # -- General configuration ---------------------------------------------------
51 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
52 |
53 | extensions = [
54 | "sphinx.ext.autodoc",
55 | "sphinx_autodoc_typehints",
56 | "sphinx_thebe",
57 | "sphinx_inline_tabs",
58 | "sphinx_copybutton",
59 | ]
60 |
61 | templates_path = ["_templates"]
62 | exclude_patterns = []
63 |
64 |
65 | # -- Options for HTML output -------------------------------------------------
66 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
67 |
68 | # Use the Sphinx Book Theme: https://github.com/executablebooks/sphinx-book-theme
69 | html_theme = "sphinx_book_theme"
70 | # Set the path to the static files directory
71 | html_static_path = ["_static"]
72 | # Add custom CSS files
73 | html_css_files = [
74 | "custom.css",
75 | ]
76 |
--------------------------------------------------------------------------------
/docs/source/index.rst:
--------------------------------------------------------------------------------
1 | =================
2 | *throttled-py*
3 | =================
4 |
5 | .. container:: badges
6 |
7 | .. image:: https://img.shields.io/badge/python-%3E%3D3.8-green?style=for-the-badge&logo=python
8 | :target: https://github.com/ZhuoZhuoCrayon/throttled-py
9 | :class: header-badge
10 | .. image:: https://img.shields.io/codecov/c/github/ZhuoZhuoCrayon/throttled-py?logo=codecov&style=for-the-badge
11 | :target: https://app.codecov.io/gh/ZhuoZhuoCrayon/throttled-py
12 | :class: header-badge
13 | .. image:: https://img.shields.io/pypi/v/throttled-py?&color=blue&style=for-the-badge&logo=python
14 | :target: https://pypi.org/project/throttled-py/
15 | :class: header-badge
16 | .. image:: https://img.shields.io/badge/issue-welcome-green?style=for-the-badge&logo=github
17 | :target: https://pypi.org/project/throttled-py/
18 | :class: header-badge
19 |
20 |
21 | Introduction
22 | =================
23 |
24 | *throttled-py* is a high-performance Python rate limiting library with
25 | multiple algorithms(Fixed Window, Sliding Window, Token Bucket, Leaky Bucket & GCRA)
26 | and storage backends (Redis, In-Memory).
27 |
28 |
29 | Features
30 | =================
31 |
32 | * Supports both synchronous and `asynchronous `_ (``async / await``).
33 | * Provides thread-safe storage backends: `Redis `_, `In-Memory (with support for key expiration and eviction) `_.
34 | * Supports multiple rate limiting algorithms: `Fixed Window `_, `Sliding Window `_, `Token Bucket `_, `Leaky Bucket `_ & `Generic Cell Rate Algorithm (GCRA) `_.
35 | * Supports `configuration of rate limiting algorithms `_ and provides flexible `quota configuration `_.
36 | * Supports immediate response and `wait-retry `_ modes, and provides `function call `_, `decorator `_, and `context manager `_ modes.
37 | * Supports integration with the `MCP `_ `Python SDK `_ to provide rate limiting support for model dialog processes.
38 | * Excellent performance, The execution time for a single rate limiting API call is equivalent to (see `Benchmarks `_ for details):
39 |
40 | * In-Memory: ~2.5-4.5x ``dict[key] += 1`` operations.
41 | * Redis: ~1.06-1.37x ``INCRBY key increment`` operations.
42 |
43 | Contents
44 | =================
45 |
46 | .. toctree::
47 | :maxdepth: 3
48 | :titlesonly:
49 |
50 | installation
51 | quickstart/index
52 | advance_usage/index
53 | changelog
54 | benchmarks
55 | api
56 |
--------------------------------------------------------------------------------
/docs/source/installation.rst:
--------------------------------------------------------------------------------
1 | =================
2 | Installation
3 | =================
4 |
5 | Install the package with pip:
6 |
7 | .. code-block::
8 |
9 | $ pip install throttled-py
10 |
11 |
12 | 1) Optional Dependencies
13 | =========================
14 |
15 | Starting from `v2.0.0 `_,
16 | only core dependencies(``in-memory``) are installed by default.
17 |
18 | To enable additional features, install optional dependencies as follows (multiple extras can
19 | be comma-separated):
20 |
21 | .. code-block:: shell
22 |
23 | $ pip install "throttled-py[redis]"
24 | $ pip install "throttled-py[redis,in-memory]"
25 |
26 |
27 | 2) Extras
28 | ==========
29 |
30 | +--------------+-----------------------------------+
31 | | Extra | Description |
32 | +==============+===================================+
33 | | ``all`` | Install all extras. |
34 | +--------------+-----------------------------------+
35 | | ``in-memory``| Use In-Memory as storage backend. |
36 | +--------------+-----------------------------------+
37 | | ``redis`` | Use Redis as storage backend. |
38 | +--------------+-----------------------------------+
39 |
40 |
--------------------------------------------------------------------------------
/docs/source/quickstart/context-manager.rst:
--------------------------------------------------------------------------------
1 | =================
2 | Context Manager
3 | =================
4 |
5 | You can use the context manager to limit the code block.
6 | When access is allowed, return :class:`RateLimitResult `.
7 |
8 | If the limit is exceeded or the retry timeout is exceeded,
9 | it will raise :class:`LimitedError `.
10 |
11 | .. tab:: Sync
12 |
13 | .. literalinclude:: ../../../examples/quickstart/context_manager_example.py
14 | :language: python
15 |
16 |
17 | .. tab:: Async
18 |
19 | .. literalinclude:: ../../../examples/quickstart/async/context_manager_example.py
20 | :language: python
21 |
--------------------------------------------------------------------------------
/docs/source/quickstart/decorator.rst:
--------------------------------------------------------------------------------
1 | =================
2 | Decorator
3 | =================
4 |
5 | You can use :class:`Throttled ` as a decorator, and
6 | the :py:meth:`limit ` method will check if
7 | the request is allowed before the wrapped function call.
8 |
9 | If the request is not allowed, it will raise :class:`LimitedError `.
10 |
11 | .. tab:: Sync
12 |
13 | .. literalinclude:: ../../../examples/quickstart/decorator_example.py
14 | :language: python
15 |
16 |
17 | .. tab:: Async
18 |
19 | .. literalinclude:: ../../../examples/quickstart/async/decorator_example.py
20 | :language: python
21 |
--------------------------------------------------------------------------------
/docs/source/quickstart/function-call.rst:
--------------------------------------------------------------------------------
1 | =================
2 | Function Call
3 | =================
4 |
5 | Using :class:`Throttled ` to check if a request is allowed is very simple.
6 |
7 | You just need to call the :py:meth:`Throttled.limit ` method and pass in the specified ``key``,
8 | which will return a :class:`RateLimitResult ` object.
9 |
10 | **It is important to note that** :py:meth:`Throttled.limit `
11 | **does not raise any exceptions**, you can determine whether the request is allowed by checking the
12 | :py:attr:`RateLimitResult.limited ` attribute.
13 |
14 | You can also get a snapshot of the Throttled state after calling :py:meth:`Throttled.limit `
15 | through the :py:attr:`RateLimitResult.state ` attribute.
16 |
17 | If you just want to check the latest state of :class:`Throttled ` without deducting requests,
18 | you can use the :py:meth:`Throttled.peek ` method,
19 | which will also return a :class:`RateLimitState ` object.
20 |
21 | The following example will guide you through the basic usage of :class:`Throttled `:
22 |
23 | .. tab:: Sync
24 |
25 | .. literalinclude:: ../../../examples/quickstart/function_call_example.py
26 | :language: python
27 |
28 |
29 | .. tab:: Async
30 |
31 | .. literalinclude:: ../../../examples/quickstart/async/function_call_example.py
32 | :language: python
33 |
--------------------------------------------------------------------------------
/docs/source/quickstart/index.rst:
--------------------------------------------------------------------------------
1 | =================
2 | Quick Start
3 | =================
4 |
5 | 1) Core API
6 | =================
7 |
8 | * :py:meth:`Throttled.limit `: Deduct requests and return :class:`RateLimitResult ` object.
9 |
10 | * :py:meth:`Throttled.peek `: Check current rate limit state for a key and return :class:`RateLimitState ` object.
11 |
12 |
13 | 2) Async Support
14 | =================
15 |
16 | The core API is the same for synchronous and asynchronous code.
17 | Just replace ``from throttled import ...`` with ``from throttled.asyncio import ...`` in your code.
18 |
19 |
20 | 3) Example
21 | =================
22 |
23 | .. tab:: Sync
24 |
25 | .. literalinclude:: ../../../examples/quickstart/quickstart_example.py
26 | :language: python
27 |
28 |
29 | .. tab:: Async
30 |
31 | .. literalinclude:: ../../../examples/quickstart/async/quickstart_example.py
32 | :language: python
33 |
34 |
35 | 4) Contents
36 | =================
37 |
38 | .. toctree::
39 | :maxdepth: 2
40 |
41 | function-call
42 | decorator
43 | context-manager
44 | wait-retry
45 | store-backends
46 | specifying-algorithms
47 | quota-configuration
48 |
--------------------------------------------------------------------------------
/docs/source/quickstart/quota-configuration.rst:
--------------------------------------------------------------------------------
1 | ======================
2 | Quota Configuration
3 | ======================
4 |
5 | :class:`Quota ` represents the rules for rate limiting.
6 |
7 |
8 | 1) Quick Setup
9 | =======================
10 |
11 | ``throttled-py`` provides quick functions to configure common time-based :class:`Quota `.
12 |
13 | .. tab:: Sync
14 |
15 | .. code-block:: python
16 |
17 | from throttled import rate_limiter
18 |
19 | rate_limiter.per_sec(60) # 60 req/sec
20 | rate_limiter.per_min(60) # 60 req/min
21 | rate_limiter.per_hour(60) # 60 req/hour
22 | rate_limiter.per_day(60) # 60 req/day
23 | rate_limiter.per_week(60) # 60 req/week
24 |
25 |
26 | .. tab:: Async
27 |
28 | .. code-block:: python
29 |
30 | from throttled.asyncio import rate_limiter
31 |
32 | rate_limiter.per_sec(60) # 60 req/sec
33 | rate_limiter.per_min(60) # 60 req/min
34 | rate_limiter.per_hour(60) # 60 req/hour
35 | rate_limiter.per_day(60) # 60 req/day
36 | rate_limiter.per_week(60) # 60 req/week
37 |
38 |
39 | 2) Custom Quota
40 | ===================
41 |
42 | If the quick configuration does not meet your needs, you can customize the :class:`Quota `
43 | through the :py:meth:`per_duration ` method:
44 |
45 | .. tab:: Sync
46 |
47 | .. code-block:: python
48 |
49 | from datetime import timedelta
50 | from throttled import rate_limiter
51 |
52 | # A total of 120 requests are allowed in two minutes, and a burst of 150 requests is allowed.
53 | rate_limiter.per_duration(timedelta(minutes=2), limit=120, burst=150)
54 |
55 |
56 | .. tab:: Async
57 |
58 | .. code-block:: python
59 |
60 | from datetime import timedelta
61 | from throttled.asyncio import rate_limiter
62 |
63 | # A total of 120 requests are allowed in two minutes, and a burst of 150 requests is allowed.
64 | rate_limiter.per_duration(timedelta(minutes=2), limit=120, burst=150)
65 |
66 |
67 | 3) Burst Capacity
68 | ===================
69 |
70 | The :py:attr:`burst ` argument can be used to adjust the ability of
71 | the throttling object to handle burst traffic.
72 |
73 | This is valid for the following algorithms:
74 |
75 | * ``TOKEN_BUCKET``
76 | * ``LEAKING_BUCKET``
77 | * ``GCRA``
78 |
79 | .. tab:: Sync
80 |
81 | .. code-block:: python
82 |
83 | from throttled import rate_limiter
84 |
85 | # Allow 120 burst requests.
86 | # When burst is not specified, the default setting is the limit passed in.
87 | rate_limiter.per_min(60, burst=120)
88 |
89 |
90 | .. tab:: Async
91 |
92 | .. code-block:: python
93 |
94 | from throttled.asyncio import rate_limiter
95 |
96 | # Allow 120 burst requests.
97 | # When burst is not specified, the default setting is the limit passed in.
98 | rate_limiter.per_min(60, burst=120)
99 |
--------------------------------------------------------------------------------
/docs/source/quickstart/specifying-algorithms.rst:
--------------------------------------------------------------------------------
1 | ======================
2 | Specifying Algorithms
3 | ======================
4 |
5 |
6 | The rate limiting algorithm is specified by the ``using`` parameter in the :py:meth:`Throttled `.
7 |
8 | The supported algorithms are as follows:
9 |
10 | * `Fixed Window `_ : ``RateLimiterType.FIXED_WINDOW.value``
11 | * `Sliding Window `_: ``RateLimiterType.SLIDING_WINDOW.value``
12 | * `Token Bucket `_: ``RateLimiterType.TOKEN_BUCKET.value``
13 | * `Leaky Bucket `_: ``RateLimiterType.LEAKING_BUCKET.value``
14 | * `Generic Cell Rate Algorithm, GCRA `_: ``RateLimiterType.GCRA.value``
15 |
16 | .. tab:: Sync
17 |
18 | .. literalinclude:: ../../../examples/quickstart/using_algorithm_example.py
19 | :language: python
20 |
21 |
22 | .. tab:: Async
23 |
24 | .. literalinclude:: ../../../examples/quickstart/async/using_algorithm_example.py
25 | :language: python
26 |
--------------------------------------------------------------------------------
/docs/source/quickstart/store-backends.rst:
--------------------------------------------------------------------------------
1 | =================
2 | Store Backends
3 | =================
4 |
5 | .. _store-backends-in-memory:
6 |
7 | 1) In-Memory
8 | =================
9 |
10 | :class:`MemoryStore ` is essentially a memory-based
11 | `LRU Cache `_ with expiration time, it is thread-safe and
12 | can be used for rate limiting in a single process.
13 |
14 | By default, :class:`Throttled ` will initialize a global
15 | :class:`MemoryStore ` instance with maximum capacity of 1024,
16 | so **you don't usually need to create it manually.**
17 |
18 | Also note that ``throttled.store.MemoryStore`` and ``throttled.asyncio.store.MemoryStore`` are implemented based on
19 | ``threading.RLock`` and ``asyncio.Lock`` respectively, so the global instance is also independent
20 | for synchronous and asynchronous usage.
21 |
22 | Different instances mean different storage spaces, if you want to limit the same key in different places
23 | in your program, **make sure that** :class:`Throttled ` **receives the same**
24 | :class:`MemoryStore ` **instance** and uses the same
25 | :class:`Quota ` configuration.
26 |
27 | The following example uses :class:`MemoryStore ` as the storage backend and
28 | throttles the same Key on ping and pong:
29 |
30 | .. tab:: Sync
31 |
32 | .. literalinclude:: ../../../examples/quickstart/memory_example.py
33 | :language: python
34 |
35 |
36 | .. tab:: Async
37 |
38 | .. literalinclude:: ../../../examples/quickstart/async/memory_example.py
39 | :language: python
40 |
41 |
42 | .. _store-backends-redis:
43 |
44 | 2) Redis
45 | =================
46 |
47 | :class:`RedisStore ` is implemented based on `redis-py `_,
48 | you can use it for rate limiting in a distributed environment.
49 |
50 | It supports the following arguments:
51 |
52 | * ``server``: `Standard Redis URL `_.
53 |
54 | * ``options``: Redis connection configuration, supports all configuration items
55 | of `redis-py `_, see :ref:`RedisStore Options `.
56 |
57 | The following example uses :class:`RedisStore ` as the storage backend:
58 |
59 | .. tab:: Sync
60 |
61 | .. literalinclude:: ../../../examples/quickstart/redis_example.py
62 | :language: python
63 |
64 |
65 | .. tab:: Async
66 |
67 | .. literalinclude:: ../../../examples/quickstart/async/redis_example.py
68 | :language: python
69 |
--------------------------------------------------------------------------------
/docs/source/quickstart/wait-retry.rst:
--------------------------------------------------------------------------------
1 | =================
2 | Wait & Retry
3 | =================
4 |
5 | By default, :class:`Throttled ` returns :class:`RateLimitResult ` immediately.
6 |
7 | To enable wait-and-retry behavior, you can use the ``timeout`` parameter.
8 |
9 | :class:`Throttled ` will wait according to the
10 | :py:attr:`RateLimitState.retry_after ` and retry automatically.
11 |
12 | In the :doc:`Function Call ` mode will return
13 | the last retried :class:`RateLimitResult `:
14 |
15 | .. tab:: Sync
16 |
17 | .. literalinclude:: ../../../examples/quickstart/wait_retry_function_call_example.py
18 | :language: python
19 |
20 |
21 | .. tab:: Async
22 |
23 | .. literalinclude:: ../../../examples/quickstart/async/wait_retry_function_call_example.py
24 | :language: python
25 |
26 |
27 |
28 | In the :doc:`Decorator ` and :doc:`Context Manager ` modes,
29 | :class:`LimitedError ` will be raised if the request is not allowed after the timeout:
30 |
31 | .. tab:: Sync
32 |
33 | .. literalinclude:: ../../../examples/quickstart/wait_retry_example.py
34 | :language: python
35 |
36 |
37 | .. tab:: Async
38 |
39 | .. literalinclude:: ../../../examples/quickstart/async/wait_retry_example.py
40 | :language: python
41 |
42 | In the above example, ``per_sec(2, burst=2)`` means allows 2 requests per second, and allows
43 | 2 burst requests (Bucket's capacity). In other words, :class:`Throttled ` will consume the burst after 2 requests.
44 | If timeout>=0.5 is set, the above example will complete all requests in 1.5 seconds (the burst is consumed
45 | immediately, and the 3 requests will be filled in the subsequent 1.5s):
46 |
47 | .. code-block::
48 |
49 | ------------- Burst---------------------
50 | Request 1 completed at 0.00s
51 | Request 2 completed at 0.00s
52 | ----------------------------------------
53 | -- Refill: 0.5 tokens per second -------
54 | Request 3 completed at 0.50s
55 | Request 4 completed at 1.00s
56 | Request 5 completed at 1.50s
57 | -----------------------------------------
58 | Total time for 5 requests at 2/sec: 1.50s
59 |
60 |
61 | ``Wait & Retry`` is most effective for smoothing out request rates, and you can feel its effect
62 | through the following example:
63 |
64 |
65 | .. tab:: Sync
66 |
67 | .. literalinclude:: ../../../examples/quickstart/wait_retry_concurrent_example.py
68 | :language: python
69 |
70 |
71 | .. tab:: Async
72 |
73 | .. literalinclude:: ../../../examples/quickstart/async/wait_retry_concurrent_example.py
74 | :language: python
75 |
--------------------------------------------------------------------------------
/examples/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZhuoZhuoCrayon/throttled-py/b621009701f911abe59e37806753fe24a4b47c94/examples/__init__.py
--------------------------------------------------------------------------------
/examples/quickstart/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZhuoZhuoCrayon/throttled-py/b621009701f911abe59e37806753fe24a4b47c94/examples/quickstart/__init__.py
--------------------------------------------------------------------------------
/examples/quickstart/async/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZhuoZhuoCrayon/throttled-py/b621009701f911abe59e37806753fe24a4b47c94/examples/quickstart/async/__init__.py
--------------------------------------------------------------------------------
/examples/quickstart/async/context_manager_example.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 |
3 | from throttled.asyncio import Throttled, exceptions, rate_limiter
4 |
5 |
6 | async def call_api():
7 | print("doing something...")
8 |
9 |
10 | async def main():
11 | throttle: Throttled = Throttled(key="/api/v1/users/", quota=rate_limiter.per_min(1))
12 | async with throttle as result:
13 | # The first call will not be rate limited.
14 | assert not result.limited
15 | # Get the state of the rate limiter:
16 | # >> RateLimitState(limit=1, remaining=0, reset_after=60, retry_after=0)
17 | print(result.state)
18 |
19 | await call_api()
20 |
21 | try:
22 | async with throttle:
23 | await call_api()
24 | except exceptions.LimitedError as exc:
25 | # >> Rate limit exceeded: remaining=0, reset_after=60, retry_after=60.
26 | print(exc)
27 |
28 |
29 | if __name__ == "__main__":
30 | asyncio.run(main())
31 |
--------------------------------------------------------------------------------
/examples/quickstart/async/decorator_example.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 |
3 | from throttled.asyncio import Throttled, exceptions, rate_limiter
4 |
5 | quota = rate_limiter.per_min(2)
6 |
7 |
8 | # Create a rate limiter that allows 2 request per minute.
9 | @Throttled(key="/ping", quota=quota)
10 | async def ping() -> str:
11 | return "ping"
12 |
13 |
14 | # Create a rate limiter that allows 2 cost per minute, consuming 2 Tokens per call.
15 | @Throttled(key="/ping", quota=quota, cost=2)
16 | async def heavy_ping() -> str:
17 | return "heavy_pong"
18 |
19 |
20 | async def main():
21 | # The first call should succeed.
22 | # >> pong
23 | print(await ping())
24 |
25 | try:
26 | # The second call will be rate limited, because heavy_ping consumes 2 Tokens
27 | # and 1 Token has been consumed by the first call.
28 | await heavy_ping()
29 | except exceptions.LimitedError as exc:
30 | # >> Rate limit exceeded: remaining=1, reset_after=30, retry_after=60.
31 | print(exc)
32 |
33 |
34 | if __name__ == "__main__":
35 | asyncio.run(main())
36 |
--------------------------------------------------------------------------------
/examples/quickstart/async/function_call_example.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 |
3 | from throttled.asyncio import Throttled
4 |
5 |
6 | async def main():
7 | # By Default, it initializes a rate limiter with In-Memory,
8 | # allowing 60 requests per minute, using the token bucket algorithm.
9 | # Default: In-Memory storage, Token Bucket algorithm, 60 reqs / min.
10 | throttle = Throttled()
11 |
12 | # Consume 1 token.
13 | result = await throttle.limit("key")
14 | # Should not be limited.
15 | assert not result.limited
16 |
17 | # Get the state of the rate limiter:
18 | # >> RateLimitState(limit=60, remaining=59, reset_after=1, retry_after=0))
19 | print(result.state)
20 |
21 | # You can also get the state by using the `peek` method.
22 | # >> RateLimitState(limit=60, remaining=59, reset_after=1, retry_after=0)
23 | print(await throttle.peek("key"))
24 |
25 | # You can also specify the cost of the request.
26 | result = await throttle.limit("key", cost=60)
27 | # This will consume 60 tokens, which exceeds the limit of 60 tokens per minute.
28 | assert result.limited
29 |
30 | # >> RateLimitState(limit=60, remaining=59, reset_after=1, retry_after=1))
31 | print(result.state)
32 |
33 |
34 | if __name__ == "__main__":
35 | asyncio.run(main())
36 |
--------------------------------------------------------------------------------
/examples/quickstart/async/memory_example.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 |
3 | from throttled.asyncio import Throttled, rate_limiter, store
4 |
5 | # 🌟 Use MemoryStore as the storage backend.
6 | mem_store = store.MemoryStore()
7 |
8 |
9 | @Throttled(key="ping-pong", quota=rate_limiter.per_min(1), store=mem_store)
10 | async def ping() -> str:
11 | return "ping"
12 |
13 |
14 | @Throttled(key="ping-pong", quota=rate_limiter.per_min(1), store=mem_store)
15 | async def pong() -> str:
16 | return "pong"
17 |
18 |
19 | async def demo():
20 | # >> ping
21 | await ping()
22 | # >> throttled.exceptions.LimitedError:
23 | # Rate limit exceeded: remaining=0, reset_after=60, retry_after=60.
24 | await pong()
25 |
26 |
27 | if __name__ == "__main__":
28 | asyncio.run(demo())
29 |
--------------------------------------------------------------------------------
/examples/quickstart/async/quickstart_example.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 |
3 | from throttled.asyncio import RateLimiterType, Throttled, rate_limiter, store, utils
4 |
5 | throttle = Throttled(
6 | # 📈 Use Token Bucket algorithm
7 | using=RateLimiterType.TOKEN_BUCKET.value,
8 | # 🪣 Set quota: 1,000 tokens per second (limit), bucket size 1,000 (burst)
9 | quota=rate_limiter.per_sec(1_000, burst=1_000),
10 | # 📁 Use In-Memory storage
11 | store=store.MemoryStore(),
12 | )
13 |
14 |
15 | async def call_api() -> bool:
16 | # 💧 Deduct 1 token for key="/ping"
17 | result = await throttle.limit("/ping", cost=1)
18 | return result.limited
19 |
20 |
21 | async def main():
22 | benchmark: utils.Benchmark = utils.Benchmark()
23 | denied_num: int = sum(await benchmark.async_serial(call_api, 100_000))
24 | print(f"❌ Denied: {denied_num} requests")
25 |
26 |
27 | if __name__ == "__main__":
28 | # 💻 Python 3.12.10, Linux 5.4.119-1-tlinux4-0009.1, Arch: x86_64, Specs: 2C4G.
29 | # ✅ Total: 100000, 🕒 Latency: 0.0068 ms/op, 🚀 Throughput: 122513 req/s (--)
30 | # ❌ Denied: 98000 requests
31 | asyncio.run(main())
32 |
--------------------------------------------------------------------------------
/examples/quickstart/async/redis_example.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 |
3 | from throttled.asyncio import RateLimiterType, Throttled, rate_limiter, store
4 |
5 |
6 | @Throttled(
7 | key="/api/products",
8 | using=RateLimiterType.TOKEN_BUCKET.value,
9 | quota=rate_limiter.per_min(1),
10 | # 🌟 use RedisStore as storage
11 | store=store.RedisStore(server="redis://127.0.0.1:6379/0", options={"PASSWORD": ""}),
12 | )
13 | async def products() -> list:
14 | return [{"name": "iPhone"}, {"name": "MacBook"}]
15 |
16 |
17 | async def demo():
18 | await products()
19 | # >> throttled.exceptions.LimitedError:
20 | # Rate limit exceeded: remaining=0, reset_after=60, retry_after=60.
21 | await products()
22 |
23 |
24 | if __name__ == "__main__":
25 | asyncio.run(demo())
26 |
--------------------------------------------------------------------------------
/examples/quickstart/async/using_algorithm_example.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 |
3 | from throttled.asyncio import RateLimiterType, Throttled, rate_limiter
4 |
5 |
6 | async def main():
7 | throttle = Throttled(
8 | # 🌟Specifying a current limiting algorithm
9 | using=RateLimiterType.FIXED_WINDOW.value,
10 | # using=RateLimiterType.SLIDING_WINDOW.value,
11 | # using=RateLimiterType.LEAKING_BUCKET.value,
12 | # using=RateLimiterType.TOKEN_BUCKET.value,
13 | # using=RateLimiterType.GCRA.value,
14 | quota=rate_limiter.per_min(1),
15 | )
16 | assert (await throttle.limit("key", 2)).limited
17 |
18 |
19 | if __name__ == "__main__":
20 | asyncio.run(main())
21 |
--------------------------------------------------------------------------------
/examples/quickstart/async/wait_retry_concurrent_example.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 |
3 | from throttled.asyncio import RateLimiterType, Throttled, rate_limiter, utils
4 |
5 | throttle = Throttled(
6 | using=RateLimiterType.GCRA.value,
7 | quota=rate_limiter.per_sec(100, burst=100),
8 | # ⏳ Set timeout to 1 second, which allows waiting for retry,
9 | # and returns the last RateLimitResult if the wait exceeds 1 second.
10 | timeout=1,
11 | )
12 |
13 |
14 | async def call_api() -> bool:
15 | # ⬆️⏳ Function call with timeout will override the global timeout.
16 | result = await throttle.limit("/ping", cost=1, timeout=1)
17 | return result.limited
18 |
19 |
20 | async def main():
21 | benchmark: utils.Benchmark = utils.Benchmark()
22 | denied_num: int = sum(await benchmark.async_concurrent(call_api, 1_000, workers=4))
23 | print(f"❌ Denied: {denied_num} requests")
24 |
25 |
26 | if __name__ == "__main__":
27 | # 👇 The actual QPS is close to the preset quota (100 req/s):
28 | # ✅ Total: 1000, 🕒 Latency: 35.8103 ms/op, 🚀 Throughput: 111 req/s (--)
29 | # ❌ Denied: 8 requests
30 | asyncio.run(main())
31 |
--------------------------------------------------------------------------------
/examples/quickstart/async/wait_retry_example.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import time
3 |
4 | from throttled.asyncio import RateLimiterType, Throttled, rate_limiter
5 |
6 |
7 | @Throttled(
8 | key="ping",
9 | using=RateLimiterType.GCRA.value,
10 | quota=rate_limiter.per_sec(2, burst=2),
11 | # ⏳ Set timeout to 0.5 second, which allows waiting for retry,
12 | # and returns the last RateLimitResult if the wait exceeds 0.5 second.
13 | timeout=0.5,
14 | )
15 | async def ping() -> str:
16 | return "pong"
17 |
18 |
19 | async def main():
20 | # Make 5 sequential requests.
21 | start_time = time.time()
22 | for i in range(5):
23 | await ping()
24 | print(f"Request {i + 1} completed at {time.time() - start_time:.2f}s")
25 |
26 | print(f"\nTotal time for 5 requests at 2/sec: {time.time() - start_time:.2f}s")
27 |
28 |
29 | if __name__ == "__main__":
30 | asyncio.run(main())
31 |
--------------------------------------------------------------------------------
/examples/quickstart/async/wait_retry_function_call_example.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import time
3 |
4 | from throttled.asyncio import Throttled, per_sec, utils
5 |
6 |
7 | async def main():
8 | # Allow 1 burst request, producing 1 token per second.
9 | throttle = Throttled(key="key", quota=per_sec(1, burst=1))
10 |
11 | # Consume burst request quota.
12 | assert not (await throttle.limit()).limited
13 |
14 | timer = utils.Timer(
15 | clock=time.time,
16 | callback=lambda elapsed, start, end: print(f"elapsed: {elapsed:.2f} seconds"),
17 | )
18 | async with timer:
19 | # Enabled wait-retry, which will wait for the next available token
20 | # if the limit is reached.
21 | # > elapsed: 1.00 seconds
22 | assert not (await throttle.limit(timeout=1)).limited
23 |
24 | with timer:
25 | # If the timeout is exceeded, it will return the last RateLimitResult.
26 | # timeout < ``RateLimitResult.retry_after``, return immediately.
27 | # > elapsed: 0 seconds
28 | assert (await throttle.limit(timeout=0.5)).limited
29 |
30 |
31 | if __name__ == "__main__":
32 | asyncio.run(main())
33 |
--------------------------------------------------------------------------------
/examples/quickstart/context_manager_example.py:
--------------------------------------------------------------------------------
1 | from throttled import Throttled, exceptions, rate_limiter
2 |
3 |
4 | def call_api():
5 | print("doing something...")
6 |
7 |
8 | def main():
9 | throttle: Throttled = Throttled(key="/api/v1/users/", quota=rate_limiter.per_min(1))
10 | with throttle as result:
11 | # The first call will not be rate limited.
12 | assert not result.limited
13 | # Get the state of the rate limiter:
14 | # >> RateLimitState(limit=1, remaining=0, reset_after=60, retry_after=0)
15 | print(result.state)
16 |
17 | call_api()
18 |
19 | try:
20 | with throttle:
21 | call_api()
22 | except exceptions.LimitedError as exc:
23 | # >> Rate limit exceeded: remaining=0, reset_after=60, retry_after=60.
24 | print(exc)
25 |
26 |
27 | if __name__ == "__main__":
28 | main()
29 |
--------------------------------------------------------------------------------
/examples/quickstart/decorator_example.py:
--------------------------------------------------------------------------------
1 | from throttled import Throttled, exceptions, rate_limiter
2 |
3 | quota = rate_limiter.per_min(2)
4 |
5 |
6 | # Create a rate limiter that allows 2 request per minute.
7 | @Throttled(key="/ping", quota=quota)
8 | def ping() -> str:
9 | return "pong"
10 |
11 |
12 | # Create a rate limiter that allows 2 cost per minute, consuming 2 Tokens per call.
13 | @Throttled(key="/ping", quota=quota, cost=2)
14 | def heavy_ping() -> str:
15 | return "heavy_pong"
16 |
17 |
18 | def main():
19 | # The first call will not be rate limited.
20 | # >> pong
21 | print(ping())
22 |
23 | try:
24 | # The second call will be rate limited, because heavy_ping consumes 2 Tokens
25 | # and 1 Token has been consumed by the first call.
26 | heavy_ping()
27 | except exceptions.LimitedError as exc:
28 | # >> Rate limit exceeded: remaining=1, reset_after=30, retry_after=60.
29 | print(exc)
30 |
31 |
32 | if __name__ == "__main__":
33 | main()
34 |
--------------------------------------------------------------------------------
/examples/quickstart/function_call_example.py:
--------------------------------------------------------------------------------
1 | from throttled import Throttled
2 |
3 |
4 | def main():
5 | # By Default, it initializes a rate limiter with In-Memory,
6 | # allowing 60 requests per minute, using the token bucket algorithm.
7 | # Default: In-Memory storage, Token Bucket algorithm, 60 reqs / min.
8 | throttle = Throttled()
9 |
10 | # Consume 1 token.
11 | result = throttle.limit("key")
12 | # Should not be limited.
13 | assert not result.limited
14 |
15 | # Get the state of the rate limiter:
16 | # >> RateLimitState(limit=60, remaining=59, reset_after=1, retry_after=0))
17 | print(result.state)
18 |
19 | # You can also get the state by using the `peek` method.
20 | # >> RateLimitState(limit=60, remaining=59, reset_after=1, retry_after=0)
21 | print(throttle.peek("key"))
22 |
23 | # You can also specify the cost of the request.
24 | result = throttle.limit("key", cost=60)
25 | # This will consume 60 tokens, which exceeds the limit of 60 tokens per minute.
26 | assert result.limited
27 |
28 | # >> RateLimitState(limit=60, remaining=59, reset_after=1, retry_after=1))
29 | print(result.state)
30 |
31 |
32 | if __name__ == "__main__":
33 | main()
34 |
--------------------------------------------------------------------------------
/examples/quickstart/memory_example.py:
--------------------------------------------------------------------------------
1 | from throttled import Throttled, rate_limiter, store
2 |
3 | # 🌟 Use MemoryStore as the storage backend.
4 | mem_store = store.MemoryStore()
5 |
6 |
7 | @Throttled(key="ping-pong", quota=rate_limiter.per_min(1), store=mem_store)
8 | def ping() -> str:
9 | return "ping"
10 |
11 |
12 | @Throttled(key="ping-pong", quota=rate_limiter.per_min(1), store=mem_store)
13 | def pong() -> str:
14 | return "pong"
15 |
16 |
17 | def demo():
18 | # >> ping
19 | ping()
20 | # >> throttled.exceptions.LimitedError:
21 | # Rate limit exceeded: remaining=0, reset_after=60, retry_after=60.
22 | pong()
23 |
24 |
25 | if __name__ == "__main__":
26 | demo()
27 |
--------------------------------------------------------------------------------
/examples/quickstart/quickstart_example.py:
--------------------------------------------------------------------------------
1 | from throttled import RateLimiterType, Throttled, rate_limiter, store, utils
2 |
3 | throttle = Throttled(
4 | # 📈 Use Token Bucket algorithm.
5 | using=RateLimiterType.TOKEN_BUCKET.value,
6 | # 🪣 Set quota: 1,000 tokens per second (limit), bucket size 1,000 (burst).
7 | quota=rate_limiter.per_sec(1_000, burst=1_000),
8 | # 📁 Use In-Memory storage.
9 | store=store.MemoryStore(),
10 | )
11 |
12 |
13 | def call_api() -> bool:
14 | # 💧 Deduct 1 token for key="/ping"
15 | result = throttle.limit("/ping", cost=1)
16 | return result.limited
17 |
18 |
19 | if __name__ == "__main__":
20 | # 💻 Python 3.12.10, Linux 5.4.119-1-tlinux4-0009.1, Arch: x86_64, Specs: 2C4G.
21 | # ✅ Total: 100000, 🕒 Latency: 0.0068 ms/op, 🚀 Throughput: 122513 req/s (--)
22 | # ❌ Denied: 98000 requests
23 | benchmark: utils.Benchmark = utils.Benchmark()
24 | denied_num: int = sum(benchmark.serial(call_api, 100_000))
25 | print(f"❌ Denied: {denied_num} requests")
26 |
--------------------------------------------------------------------------------
/examples/quickstart/quota_example.py:
--------------------------------------------------------------------------------
1 | from datetime import timedelta
2 |
3 | from throttled import rate_limiter
4 |
5 | rate_limiter.per_sec(60) # 60 req/sec
6 | rate_limiter.per_min(60) # 60 req/min
7 | rate_limiter.per_hour(60) # 60 req/hour
8 | rate_limiter.per_day(60) # 60 req/day
9 | rate_limiter.per_week(60) # 60 req/week
10 |
11 | # 允许突发处理 120 个请求。
12 | # 未指定 burst 时,默认设置为 limit 传入值。
13 | rate_limiter.per_min(60, burst=120)
14 |
15 | # 两分钟一共允许 120 个请求,允许突发处理 150 个请求。
16 | rate_limiter.per_duration(timedelta(minutes=2), limit=120, burst=150)
17 |
--------------------------------------------------------------------------------
/examples/quickstart/redis_example.py:
--------------------------------------------------------------------------------
1 | from throttled import RateLimiterType, Throttled, rate_limiter, store
2 |
3 |
4 | @Throttled(
5 | key="/api/products",
6 | using=RateLimiterType.TOKEN_BUCKET.value,
7 | quota=rate_limiter.per_min(1),
8 | # 🌟 use RedisStore as storage
9 | store=store.RedisStore(server="redis://127.0.0.1:6379/0", options={"PASSWORD": ""}),
10 | )
11 | def products() -> list:
12 | return [{"name": "iPhone"}, {"name": "MacBook"}]
13 |
14 |
15 | def demo():
16 | products()
17 | # >> throttled.exceptions.LimitedError:
18 | # Rate limit exceeded: remaining=0, reset_after=60, retry_after=60.
19 | products()
20 |
21 |
22 | if __name__ == "__main__":
23 | demo()
24 |
--------------------------------------------------------------------------------
/examples/quickstart/using_algorithm_example.py:
--------------------------------------------------------------------------------
1 | from throttled import RateLimiterType, Throttled, rate_limiter
2 |
3 |
4 | def main():
5 | throttle = Throttled(
6 | # 🌟Specifying a current limiting algorithm
7 | using=RateLimiterType.FIXED_WINDOW.value,
8 | # using=RateLimiterType.SLIDING_WINDOW.value,
9 | # using=RateLimiterType.LEAKING_BUCKET.value,
10 | # using=RateLimiterType.TOKEN_BUCKET.value,
11 | # using=RateLimiterType.GCRA.value,
12 | quota=rate_limiter.per_min(1),
13 | )
14 | assert throttle.limit("key", 2).limited
15 |
16 |
17 | if __name__ == "__main__":
18 | main()
19 |
--------------------------------------------------------------------------------
/examples/quickstart/wait_retry_concurrent_example.py:
--------------------------------------------------------------------------------
1 | from throttled import RateLimiterType, Throttled, rate_limiter, utils
2 |
3 | throttle = Throttled(
4 | using=RateLimiterType.GCRA.value,
5 | quota=rate_limiter.per_sec(100, burst=100),
6 | # ⏳ Set timeout to 1 second, which allows waiting for retry,
7 | # and returns the last RateLimitResult if the wait exceeds 1 second.
8 | timeout=1,
9 | )
10 |
11 |
12 | def call_api() -> bool:
13 | # ⬆️⏳ Function call with timeout will override the global timeout.
14 | result = throttle.limit("/ping", cost=1, timeout=1)
15 | return result.limited
16 |
17 |
18 | if __name__ == "__main__":
19 | # 👇 The actual QPS is close to the preset quota (100 req/s):
20 | # ✅ Total: 1000, 🕒 Latency: 35.8103 ms/op, 🚀 Throughput: 111 req/s (--)
21 | # ❌ Denied: 8 requests
22 | benchmark: utils.Benchmark = utils.Benchmark()
23 | denied_num: int = sum(benchmark.concurrent(call_api, 1_000, workers=4))
24 | print(f"❌ Denied: {denied_num} requests")
25 |
--------------------------------------------------------------------------------
/examples/quickstart/wait_retry_example.py:
--------------------------------------------------------------------------------
1 | import time
2 |
3 | from throttled import RateLimiterType, Throttled, rate_limiter
4 |
5 |
6 | @Throttled(
7 | key="ping",
8 | using=RateLimiterType.GCRA.value,
9 | quota=rate_limiter.per_sec(2, burst=2),
10 | # ⏳ Set timeout to 0.5 second, which allows waiting for retry,
11 | # and returns the last RateLimitResult if the wait exceeds 0.5 second.
12 | timeout=0.5,
13 | )
14 | def ping() -> str:
15 | return "pong"
16 |
17 |
18 | def main():
19 | # Make 5 sequential requests.
20 | start_time = time.time()
21 | for i in range(5):
22 | ping()
23 | print(f"Request {i + 1} completed at {time.time() - start_time:.2f}s")
24 |
25 | total_time = time.time() - start_time
26 | print(f"\nTotal time for 5 requests at 2/sec: {total_time:.2f}s")
27 |
28 |
29 | if __name__ == "__main__":
30 | main()
31 |
--------------------------------------------------------------------------------
/examples/quickstart/wait_retry_function_call_example.py:
--------------------------------------------------------------------------------
1 | import time
2 |
3 | from throttled import Throttled, per_sec, utils
4 |
5 |
6 | def main():
7 | # Allow 1 burst request, producing 1 token per second.
8 | throttle = Throttled(key="key", quota=per_sec(1, burst=1))
9 |
10 | # Consume burst request quota.
11 | assert not throttle.limit().limited
12 |
13 | timer = utils.Timer(
14 | clock=time.time,
15 | callback=lambda elapsed, start, end: print(f"elapsed: {elapsed:.2f} seconds"),
16 | )
17 | with timer:
18 | # Enabled wait-retry, which will wait for the next available token
19 | # if the limit is reached.
20 | # > elapsed: 1.00 seconds
21 | assert not throttle.limit(timeout=1).limited
22 |
23 | with timer:
24 | # If the timeout is exceeded, it will return the last RateLimitResult.
25 | # timeout < ``RateLimitResult.retry_after``, return immediately.
26 | # > elapsed: 0 seconds
27 | assert throttle.limit(timeout=0.5).limited
28 |
29 |
30 | if __name__ == "__main__":
31 | main()
32 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [tool.poetry]
2 | name = "throttled-py"
3 | version = "2.2.3"
4 | description = "🔧 High-performance Python rate limiting library with multiple algorithms (Fixed Window, Sliding Window, Token Bucket, Leaky Bucket & GCRA) and storage backends (Redis, In-Memory)."
5 | authors = ["ZhuoZhuoCrayon "]
6 | license = "MIT"
7 | readme = "README.md"
8 | packages = [
9 | { include = "throttled" },
10 | ]
11 |
12 | homepage = "https://github.com/ZhuoZhuoCrayon/throttled-py"
13 | repository = "https://github.com/ZhuoZhuoCrayon/throttled-py"
14 | documentation = "https://throttled-py.readthedocs.io"
15 |
16 | [tool.poetry.urls]
17 | Changes = "https://github.com/ZhuoZhuoCrayon/throttled-py/releases"
18 | "Bug Tracker" = "https://github.com/ZhuoZhuoCrayon/throttled-py/issues"
19 |
20 | [tool.poetry.dependencies]
21 | python = ">=3.8,<4.0"
22 | redis = { version = "^5.2.1", optional = true }
23 |
24 | [tool.poetry.extras]
25 | in-memory = []
26 | redis = ["redis"]
27 | all = ["redis"]
28 |
29 | [tool.poetry.group.dev.dependencies]
30 | poetry-setup = "^0.3.6"
31 | twine = "^6.0.1"
32 | ipython = [
33 | { version = "^7.34.0", python = "~3.8 || ~3.9" },
34 | { version = "^8", python = "^3.10" }
35 | ]
36 | pre-commit = "^3.4.0"
37 | pytest = "^8.3.4"
38 | pytest-cov = [
39 | {version = "^2", python = "~3.8"},
40 | {version = "^6", python = "^3.9"}
41 | ]
42 | pytest-asyncio = [
43 | {version = "~0.24.0", python = "~3.8"},
44 | {version = "^0.26.0", python = "^3.9"}
45 | ]
46 | fakeredis = {extras = ["lua"], version = "^2.26.2"}
47 |
48 | [[tool.poetry.source]]
49 | priority = "primary"
50 | name = "tencent-mirror"
51 | url = "https://mirrors.tencent.com/pypi/simple/"
52 |
53 | [build-system]
54 | requires = [
55 | "poetry-core>=1.0.0; python_version != '3.8'",
56 | "poetry-core>=1.0.0,<2; python_version == '3.8'"
57 | ]
58 | build-backend = "poetry.core.masonry.api"
59 |
60 | [tool.flake8]
61 | python_requires = ">= 3.8"
62 | ignore = "F405,W503,E203,TYP001"
63 | max-line-length = 89
64 | max-complexity = 26
65 | format = "pylint"
66 | show_source = "false"
67 | statistics = "true"
68 | count = "true"
69 | exclude = [
70 | "migrations",
71 | "*.pyc",
72 | ".git",
73 | "__pycache__",
74 | "static",
75 | "node_modules",
76 | "templates",
77 | "bin",
78 | "config",
79 | "scripts",
80 | "script",
81 | ]
82 | #per-file-ignores = """
83 | # xxx.py: E501
84 | #"""
85 |
86 | [tool.black]
87 | line-length = 89
88 | include = '\.pyi?$'
89 | exclude = '''
90 | /(
91 | \.git
92 | | \.hg
93 | | \.mypy_cache
94 | | \.tox
95 | | \.venv
96 | | _build
97 | | buck-out
98 | | build
99 | | dist
100 | | migrations
101 | )/
102 | '''
103 |
104 | [tool.isort]
105 | # refer: https://pycqa.github.io/isort/docs/configuration/options.html
106 | profile = "black"
107 | line_length = 89
108 | skip_glob = ["*/migrations/*", "*/node_modules/*"]
109 |
110 | [tool.pytest.ini_options]
111 | asyncio_default_fixture_loop_scope = "function"
112 |
--------------------------------------------------------------------------------
/tests/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZhuoZhuoCrayon/throttled-py/b621009701f911abe59e37806753fe24a4b47c94/tests/__init__.py
--------------------------------------------------------------------------------
/tests/asyncio/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZhuoZhuoCrayon/throttled-py/b621009701f911abe59e37806753fe24a4b47c94/tests/asyncio/__init__.py
--------------------------------------------------------------------------------
/tests/asyncio/benchmarks/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZhuoZhuoCrayon/throttled-py/b621009701f911abe59e37806753fe24a4b47c94/tests/asyncio/benchmarks/__init__.py
--------------------------------------------------------------------------------
/tests/asyncio/benchmarks/test_throttled.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | from typing import Dict, List
3 |
4 | import pytest
5 | import pytest_asyncio
6 | from redis.asyncio import Redis
7 |
8 | from throttled.asyncio import (
9 | BaseStore,
10 | MemoryStore,
11 | Quota,
12 | RateLimiterType,
13 | RedisStore,
14 | Throttled,
15 | constants,
16 | per_sec,
17 | types,
18 | utils,
19 | )
20 |
21 | REDIS_URL: str = "redis://127.0.0.1:6379/0"
22 |
23 | WORKERS: int = 8
24 |
25 |
26 | async def clear_redis(client: Redis) -> None:
27 | keys: List[str] = await client.keys("throttled*")
28 | await client.delete(*keys)
29 |
30 |
31 | async def redis_baseline(client: Redis):
32 | await client.incrby("throttled:v2", 1)
33 |
34 |
35 | async def memory_baseline(dict_store: Dict[str, int]):
36 | dict_store["throttled:v2"] = dict_store.get("throttled:v2", 0) + 1
37 |
38 |
39 | async def memory_with_lock_baseline(lock: asyncio.Lock, dict_store: Dict[str, int]):
40 | async with lock:
41 | await memory_baseline(dict_store)
42 |
43 |
44 | async def call_api(throttle: Throttled) -> bool:
45 | return (await throttle.limit("/ping", cost=1)).limited
46 |
47 |
48 | @pytest_asyncio.fixture(params=constants.StoreType.choice())
49 | def store(request) -> BaseStore:
50 | def _create_store(store_type: str) -> BaseStore:
51 | if store_type == constants.StoreType.MEMORY.value:
52 | return MemoryStore()
53 | else:
54 | return RedisStore(server=REDIS_URL)
55 |
56 | store: BaseStore = _create_store(request.param)
57 |
58 | yield store
59 |
60 | if request.param == constants.StoreType.REDIS.value:
61 | clear_redis(store._backend.get_client())
62 |
63 |
64 | @pytest_asyncio.fixture
65 | async def redis_client() -> Redis:
66 | client: Redis = Redis.from_url(REDIS_URL)
67 |
68 | yield client
69 |
70 | await clear_redis(client)
71 |
72 |
73 | @pytest.mark.skip(reason="skip benchmarks")
74 | @pytest.mark.asyncio
75 | class TestBenchmarkThrottled:
76 | async def test_memory_baseline__serial(self, benchmark: utils.Benchmark):
77 | await benchmark.async_serial(memory_baseline, batch=500_000, dict_store={})
78 |
79 | async def test_memory_baseline__concurrent(self, benchmark: utils.Benchmark):
80 | await benchmark.async_concurrent(
81 | memory_with_lock_baseline,
82 | batch=100_000,
83 | workers=WORKERS,
84 | lock=asyncio.Lock(),
85 | dict_store={},
86 | )
87 |
88 | async def test_redis_baseline__serial(
89 | self, benchmark: utils.Benchmark, redis_client: Redis
90 | ):
91 | await benchmark.async_serial(redis_baseline, batch=100_000, client=redis_client)
92 |
93 | async def test_redis_baseline__concurrent(
94 | self, benchmark: utils.Benchmark, redis_client: Redis
95 | ):
96 | await benchmark.async_concurrent(
97 | redis_baseline, batch=100_000, workers=WORKERS, client=redis_client
98 | )
99 |
100 | @pytest.mark.parametrize("using", RateLimiterType.choice())
101 | @pytest.mark.parametrize("quota", [per_sec(1_000)])
102 | async def test_limit__serial(
103 | self,
104 | benchmark: utils.Benchmark,
105 | store: BaseStore,
106 | using: types.RateLimiterTypeT,
107 | quota: Quota,
108 | ):
109 | throttle = Throttled(using=using, quota=quota, store=store)
110 | await benchmark.async_serial(call_api, batch=100_000, throttle=throttle)
111 |
112 | @pytest.mark.parametrize("using", RateLimiterType.choice())
113 | @pytest.mark.parametrize("quota", [per_sec(1_000)])
114 | async def test_limit__concurrent(
115 | self,
116 | benchmark: utils.Benchmark,
117 | store: BaseStore,
118 | using: types.RateLimiterTypeT,
119 | quota: Quota,
120 | ):
121 | throttle = Throttled(using=using, quota=quota, store=store)
122 | await benchmark.async_concurrent(
123 | call_api, batch=100_000, workers=WORKERS, throttle=throttle
124 | )
125 |
--------------------------------------------------------------------------------
/tests/asyncio/conftest.py:
--------------------------------------------------------------------------------
1 | import pytest_asyncio
2 | from fakeredis.aioredis import FakeConnection
3 |
4 | from throttled.asyncio import BaseStore, MemoryStore, RedisStore, constants
5 |
6 |
7 | @pytest_asyncio.fixture(
8 | params=[constants.StoreType.MEMORY.value, constants.StoreType.REDIS.value]
9 | )
10 | async def store(request) -> BaseStore:
11 | def _create_store(store_type: str) -> BaseStore:
12 | if store_type == constants.StoreType.MEMORY.value:
13 | return MemoryStore()
14 | else:
15 | return RedisStore(
16 | options={
17 | "REDIS_CLIENT_CLASS": "fakeredis.aioredis.FakeRedis",
18 | "CONNECTION_POOL_KWARGS": {"connection_class": FakeConnection},
19 | }
20 | )
21 |
22 | store: BaseStore = _create_store(request.param)
23 |
24 | yield store
25 |
26 | if request.param == constants.StoreType.REDIS.value:
27 | await store._backend.get_client().flushall()
28 |
--------------------------------------------------------------------------------
/tests/asyncio/rate_limiter/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZhuoZhuoCrayon/throttled-py/b621009701f911abe59e37806753fe24a4b47c94/tests/asyncio/rate_limiter/__init__.py
--------------------------------------------------------------------------------
/tests/asyncio/rate_limiter/test_fixed_window.py:
--------------------------------------------------------------------------------
1 | from datetime import timedelta
2 | from typing import Any, Callable, Generator, List
3 |
4 | import pytest
5 |
6 | from tests.rate_limiter import parametrizes
7 | from tests.rate_limiter.test_fixed_window import assert_rate_limit_result
8 | from throttled.asyncio import (
9 | BaseRateLimiter,
10 | BaseStore,
11 | Quota,
12 | Rate,
13 | RateLimiterRegistry,
14 | RateLimiterType,
15 | RateLimitResult,
16 | RateLimitState,
17 | per_min,
18 | )
19 | from throttled.utils import Benchmark, now_sec
20 |
21 |
22 | @pytest.fixture
23 | def rate_limiter_constructor(
24 | store: BaseStore,
25 | ) -> Generator[Callable[[Quota], BaseRateLimiter], Any, None]:
26 | def _create_rate_limiter(quota: Quota) -> BaseRateLimiter:
27 | return RateLimiterRegistry.get(RateLimiterType.FIXED_WINDOW.value)(quota, store)
28 |
29 | yield _create_rate_limiter
30 |
31 |
32 | @pytest.mark.asyncio
33 | class TestFixedWindowRateLimiter:
34 | async def test_limit(
35 | self, rate_limiter_constructor: Callable[[Quota], BaseRateLimiter]
36 | ):
37 | limit: int = 5
38 | period: int = 60
39 | quota: Quota = Quota(Rate(period=timedelta(minutes=1), limit=limit))
40 |
41 | key: str = "key"
42 | rate_limiter: BaseRateLimiter = rate_limiter_constructor(quota)
43 |
44 | store_key: str = f"throttled:v1:fixed_window:key:period:{now_sec() // period}"
45 | assert await rate_limiter._store.exists(store_key) is False
46 |
47 | for case in parametrizes.FIXED_WINDOW_LIMIT_CASES:
48 | result: RateLimitResult = await rate_limiter.limit(key, cost=case["cost"])
49 | assert_rate_limit_result(case["limited"], case["remaining"], quota, result)
50 | assert await rate_limiter._store.get(store_key) == case["count"]
51 |
52 | @pytest.mark.parametrize(
53 | "quota", [per_min(1), per_min(10), per_min(100), per_min(1_000)]
54 | )
55 | @pytest.mark.parametrize("requests_num", [10, 100, 1_000, 10_000])
56 | async def test_limit__concurrent(
57 | self,
58 | benchmark: Benchmark,
59 | rate_limiter_constructor: Callable[[Quota], BaseRateLimiter],
60 | quota: Quota,
61 | requests_num: int,
62 | ):
63 | rate_limiter: BaseRateLimiter = rate_limiter_constructor(quota)
64 |
65 | async def _task():
66 | result = await rate_limiter.limit("key")
67 | return result.limited
68 |
69 | results: List[bool] = await benchmark.async_concurrent(
70 | task=_task, batch=requests_num
71 | )
72 |
73 | accessed_num: int = requests_num - sum(results)
74 | limit: int = min(requests_num, quota.get_limit())
75 | # Period boundaries may burst with 2 times the number of requests.
76 | assert limit <= accessed_num <= 2 * limit
77 |
78 | async def test_peek(
79 | self, rate_limiter_constructor: Callable[[Quota], BaseRateLimiter]
80 | ):
81 | key: str = "key"
82 | rate_limiter: BaseRateLimiter = rate_limiter_constructor(per_min(1))
83 |
84 | def _assert(_state: RateLimitState):
85 | assert _state.limit == 1
86 | assert _state.reset_after - (60 - (now_sec() % 60)) <= 1
87 |
88 | state: RateLimitState = await rate_limiter.peek(key)
89 | _assert(state)
90 | assert state.remaining == 1
91 |
92 | await rate_limiter.limit(key)
93 |
94 | state: RateLimitState = await rate_limiter.peek(key)
95 | assert state.remaining == 0
96 | _assert(state)
97 |
--------------------------------------------------------------------------------
/tests/asyncio/rate_limiter/test_gcra.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | from typing import Callable, List
3 |
4 | import pytest
5 |
6 | from throttled.asyncio import (
7 | BaseRateLimiter,
8 | BaseStore,
9 | Quota,
10 | RateLimiterRegistry,
11 | RateLimiterType,
12 | RateLimitResult,
13 | RateLimitState,
14 | per_min,
15 | types,
16 | utils,
17 | )
18 |
19 | from ...rate_limiter import parametrizes
20 | from ...rate_limiter.test_gcra import assert_rate_limit_result
21 |
22 |
23 | @pytest.fixture
24 | def rate_limiter_constructor(store: BaseStore) -> Callable[[Quota], BaseRateLimiter]:
25 | def _create_rate_limiter(quota: Quota) -> BaseRateLimiter:
26 | return RateLimiterRegistry.get(RateLimiterType.GCRA.value)(quota, store)
27 |
28 | return _create_rate_limiter
29 |
30 |
31 | @pytest.mark.asyncio
32 | class TestGCRARateLimiter:
33 | async def test_limit(
34 | self, rate_limiter_constructor: Callable[[Quota], BaseRateLimiter]
35 | ):
36 | quota: Quota = per_min(limit=60, burst=10)
37 | rate_limiter: BaseRateLimiter = rate_limiter_constructor(quota)
38 | for case in parametrizes.GCRA_LIMIT_CASES:
39 | if "sleep" in case:
40 | await asyncio.sleep(case["sleep"])
41 |
42 | result: RateLimitResult = await rate_limiter.limit("key", cost=case["cost"])
43 | assert_rate_limit_result(case["limited"], case["remaining"], quota, result)
44 |
45 | @parametrizes.LIMIT_C_QUOTA
46 | @parametrizes.LIMIT_C_REQUESTS_NUM
47 | async def test_limit__concurrent(
48 | self,
49 | benchmark: utils.Benchmark,
50 | rate_limiter_constructor: Callable[[Quota], BaseRateLimiter],
51 | quota: Quota,
52 | requests_num: int,
53 | ):
54 | def _callback(elapsed: types.TimeLikeValueT, *args, **kwargs):
55 | accessed_num: int = requests_num - sum(results)
56 | limit: int = min(requests_num, quota.get_limit())
57 | rate: float = quota.get_limit() / quota.get_period_sec()
58 | assert limit <= accessed_num <= limit + (elapsed + 2) * rate
59 |
60 | async def _task():
61 | result = await rate_limiter.limit("key")
62 | return result.limited
63 |
64 | async with utils.Timer(callback=_callback):
65 | rate_limiter: BaseRateLimiter = rate_limiter_constructor(quota)
66 | results: List[bool] = await benchmark.async_concurrent(
67 | task=_task, batch=requests_num
68 | )
69 |
70 | async def test_peek(
71 | self, rate_limiter_constructor: Callable[[Quota], BaseRateLimiter]
72 | ):
73 | key: str = "key"
74 | quota: Quota = per_min(limit=60, burst=10)
75 | rate_limiter: BaseRateLimiter = rate_limiter_constructor(quota)
76 |
77 | state: RateLimitState = await rate_limiter.peek(key)
78 | assert state == RateLimitState(limit=10, remaining=10, reset_after=0)
79 |
80 | await rate_limiter.limit(key, cost=5)
81 | state: RateLimitState = await rate_limiter.peek(key)
82 | assert state.limit == 10 and state.remaining == 5
83 | assert 5 - state.reset_after < 0.1
84 |
85 | await asyncio.sleep(1)
86 | state: RateLimitState = await rate_limiter.peek(key)
87 | assert state.limit == 10 and state.remaining == 6
88 | assert 4 - state.reset_after < 0.1
89 |
90 | await rate_limiter.limit(key, cost=6)
91 | state: RateLimitState = await rate_limiter.peek(key)
92 | assert state.remaining == 0
93 | assert 10 - state.reset_after < 0.1
94 |
--------------------------------------------------------------------------------
/tests/asyncio/rate_limiter/test_leaking_bucket.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | from typing import Callable, List
3 |
4 | import pytest
5 |
6 | from throttled.asyncio import (
7 | BaseRateLimiter,
8 | BaseStore,
9 | Quota,
10 | RateLimiterRegistry,
11 | RateLimitResult,
12 | RateLimitState,
13 | constants,
14 | per_min,
15 | types,
16 | utils,
17 | )
18 |
19 | from ...rate_limiter import parametrizes
20 | from ...rate_limiter.test_leaking_bucket import assert_rate_limit_result
21 |
22 |
23 | @pytest.fixture
24 | def rate_limiter_constructor(store: BaseStore) -> Callable[[Quota], BaseRateLimiter]:
25 | def _create_rate_limiter(quota: Quota) -> BaseRateLimiter:
26 | return RateLimiterRegistry.get(constants.RateLimiterType.LEAKING_BUCKET.value)(
27 | quota, store
28 | )
29 |
30 | return _create_rate_limiter
31 |
32 |
33 | @pytest.mark.asyncio
34 | class TestLeakingBucketRateLimiter:
35 | async def test_limit(
36 | self, rate_limiter_constructor: Callable[[Quota], BaseRateLimiter]
37 | ):
38 | quota: Quota = per_min(limit=60, burst=10)
39 | rate_limiter: BaseRateLimiter = rate_limiter_constructor(quota)
40 | for case in parametrizes.LEAKING_BUCKET_LIMIT_CASES:
41 | if "sleep" in case:
42 | await asyncio.sleep(case["sleep"])
43 |
44 | result: RateLimitResult = await rate_limiter.limit("key", cost=case["cost"])
45 | assert_rate_limit_result(case, quota, result)
46 |
47 | @parametrizes.LIMIT_C_QUOTA
48 | @parametrizes.LIMIT_C_REQUESTS_NUM
49 | async def test_limit__concurrent(
50 | self,
51 | benchmark: utils.Benchmark,
52 | rate_limiter_constructor: Callable[[Quota], BaseRateLimiter],
53 | quota: Quota,
54 | requests_num: int,
55 | ):
56 | def _callback(elapsed: types.TimeLikeValueT, *args, **kwargs):
57 | accessed_num: int = requests_num - sum(results)
58 | limit: int = min(requests_num, quota.get_limit())
59 | rate: float = quota.get_limit() / quota.get_period_sec()
60 | assert limit <= accessed_num <= limit + (elapsed + 5) * rate
61 |
62 | async def _task():
63 | result = await rate_limiter.limit("key")
64 | return result.limited
65 |
66 | with utils.Timer(callback=_callback):
67 | rate_limiter: BaseRateLimiter = rate_limiter_constructor(quota)
68 | results: List[bool] = await benchmark.async_concurrent(
69 | task=_task, batch=requests_num
70 | )
71 |
72 | async def test_peek(
73 | self, rate_limiter_constructor: Callable[[Quota], BaseRateLimiter]
74 | ):
75 | key: str = "key"
76 | quota: Quota = per_min(limit=60, burst=10)
77 | rate_limiter: BaseRateLimiter = rate_limiter_constructor(quota)
78 |
79 | state: RateLimitState = await rate_limiter.peek(key)
80 | assert state == RateLimitState(limit=10, remaining=10, reset_after=0)
81 |
82 | await rate_limiter.limit(key, cost=5)
83 | state: RateLimitState = await rate_limiter.peek(key)
84 | assert state == RateLimitState(limit=10, remaining=5, reset_after=5)
85 |
86 | await asyncio.sleep(1)
87 | state: RateLimitState = await rate_limiter.peek(key)
88 | assert state.limit == 10
89 | assert 6 - state.remaining <= 1
90 | assert 4 - state.reset_after <= 4
91 |
--------------------------------------------------------------------------------
/tests/asyncio/rate_limiter/test_sliding_window.py:
--------------------------------------------------------------------------------
1 | import math
2 | from datetime import timedelta
3 | from typing import Any, Callable, Generator, List
4 |
5 | import pytest
6 |
7 | from throttled.asyncio import (
8 | BaseRateLimiter,
9 | BaseStore,
10 | Quota,
11 | Rate,
12 | RateLimiterRegistry,
13 | RateLimitResult,
14 | RateLimitState,
15 | constants,
16 | per_min,
17 | types,
18 | utils,
19 | )
20 |
21 | from ...rate_limiter import parametrizes
22 | from ...rate_limiter.test_sliding_window import assert_rate_limit_result
23 |
24 |
25 | @pytest.fixture
26 | def rate_limiter_constructor(
27 | store: BaseStore,
28 | ) -> Generator[Callable[[Quota], BaseRateLimiter], Any, None]:
29 | def _create_rate_limiter(quota: Quota) -> BaseRateLimiter:
30 | return RateLimiterRegistry.get(constants.RateLimiterType.SLIDING_WINDOW.value)(
31 | quota, store
32 | )
33 |
34 | yield _create_rate_limiter
35 |
36 |
37 | @pytest.mark.asyncio
38 | class TestSlidingWindowRateLimiter:
39 | async def test_limit(
40 | self, rate_limiter_constructor: Callable[[Quota], BaseRateLimiter]
41 | ):
42 | limit: int = 5
43 | period: int = 60
44 | quota: Quota = Quota(Rate(period=timedelta(minutes=1), limit=limit))
45 |
46 | rate_limiter: BaseRateLimiter = rate_limiter_constructor(quota)
47 | store_key: str = (
48 | f"throttled:v1:sliding_window:key:period:{utils.now_sec() // period}"
49 | )
50 | assert await rate_limiter._store.exists(store_key) is False
51 |
52 | for case in parametrizes.SLIDING_WINDOW_LIMIT_CASES:
53 | result: RateLimitResult = await rate_limiter.limit("key", cost=case["cost"])
54 | assert_rate_limit_result(case["limited"], case["remaining"], quota, result)
55 | assert await rate_limiter._store.get(store_key) == case["count"]
56 | if "ttl" in case:
57 | assert await rate_limiter._store.ttl(store_key) == case["ttl"]
58 |
59 | @parametrizes.LIMIT_C_QUOTA
60 | @parametrizes.LIMIT_C_REQUESTS_NUM
61 | async def test_limit__concurrent(
62 | self,
63 | benchmark: utils.Benchmark,
64 | rate_limiter_constructor: Callable[[Quota], BaseRateLimiter],
65 | quota: Quota,
66 | requests_num: int,
67 | ):
68 | def _callback(elapsed: types.TimeLikeValueT, *args, **kwargs):
69 | accessed_num: int = requests_num - sum(results)
70 | limit: int = min(requests_num, quota.get_limit())
71 | assert abs(accessed_num - limit) <= math.ceil(
72 | (elapsed + 2) * quota.fill_rate
73 | )
74 |
75 | async def _task():
76 | result = await rate_limiter.limit("key")
77 | return result.limited
78 |
79 | with utils.Timer(callback=_callback):
80 | rate_limiter: BaseRateLimiter = rate_limiter_constructor(quota)
81 | results: List[bool] = await benchmark.async_concurrent(
82 | task=_task, batch=requests_num
83 | )
84 |
85 | async def test_peek(
86 | self, rate_limiter_constructor: Callable[[Quota], BaseRateLimiter]
87 | ):
88 | key: str = "key"
89 | rate_limiter: BaseRateLimiter = rate_limiter_constructor(per_min(1))
90 | assert await rate_limiter.peek(key) == RateLimitState(
91 | limit=1, remaining=1, reset_after=60
92 | )
93 | await rate_limiter.limit(key)
94 | assert await rate_limiter.peek(key) == RateLimitState(
95 | limit=1, remaining=0, reset_after=60
96 | )
97 |
--------------------------------------------------------------------------------
/tests/asyncio/rate_limiter/test_token_bucket.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | from typing import Any, Callable, Generator, List
3 |
4 | import pytest
5 |
6 | from throttled.asyncio import (
7 | BaseRateLimiter,
8 | BaseStore,
9 | Quota,
10 | RateLimiterRegistry,
11 | RateLimitResult,
12 | RateLimitState,
13 | constants,
14 | per_min,
15 | types,
16 | utils,
17 | )
18 |
19 | from ...rate_limiter import parametrizes
20 | from ...rate_limiter.test_token_bucket import assert_rate_limit_result
21 |
22 |
23 | @pytest.fixture
24 | def rate_limiter_constructor(
25 | store: BaseStore,
26 | ) -> Generator[Callable[[Quota], BaseRateLimiter], Any, None]:
27 | def _create_rate_limiter(quota: Quota) -> BaseRateLimiter:
28 | return RateLimiterRegistry.get(constants.RateLimiterType.TOKEN_BUCKET.value)(
29 | quota, store
30 | )
31 |
32 | yield _create_rate_limiter
33 |
34 |
35 | @pytest.mark.asyncio
36 | class TestTokenBucketRateLimiter:
37 | async def test_limit(
38 | self, rate_limiter_constructor: Callable[[Quota], BaseRateLimiter]
39 | ):
40 | quota: Quota = per_min(limit=60, burst=10)
41 | rate_limiter: BaseRateLimiter = rate_limiter_constructor(quota)
42 | for case in parametrizes.TOKEN_BUCKET_LIMIT_CASES:
43 | if "sleep" in case:
44 | await asyncio.sleep(case["sleep"])
45 |
46 | result: RateLimitResult = await rate_limiter.limit("key", cost=case["cost"])
47 | assert_rate_limit_result(case, quota, result)
48 |
49 | @parametrizes.LIMIT_C_QUOTA
50 | @parametrizes.LIMIT_C_REQUESTS_NUM
51 | async def test_limit__concurrent(
52 | self,
53 | benchmark: utils.Benchmark,
54 | rate_limiter_constructor: Callable[[Quota], BaseRateLimiter],
55 | quota: Quota,
56 | requests_num: int,
57 | ):
58 | def _callback(elapsed: types.TimeLikeValueT, *args, **kwargs):
59 | accessed_num: int = requests_num - sum(results)
60 | limit: int = min(requests_num, quota.get_limit())
61 |
62 | assert accessed_num >= limit
63 | assert accessed_num <= limit + (elapsed + 6) * quota.fill_rate
64 |
65 | async def _task():
66 | result = await rate_limiter.limit("key")
67 | return result.limited
68 |
69 | async with utils.Timer(callback=_callback):
70 | rate_limiter: BaseRateLimiter = rate_limiter_constructor(quota)
71 | results: List[bool] = await benchmark.async_concurrent(
72 | task=_task, batch=requests_num
73 | )
74 |
75 | async def test_peek(
76 | self, rate_limiter_constructor: Callable[[Quota], BaseRateLimiter]
77 | ):
78 | key: str = "key"
79 | quota: Quota = per_min(limit=60, burst=10)
80 | rate_limiter: BaseRateLimiter = rate_limiter_constructor(quota)
81 |
82 | state: RateLimitState = await rate_limiter.peek(key)
83 | assert state == RateLimitState(limit=10, remaining=10, reset_after=0)
84 |
85 | await rate_limiter.limit(key, cost=5)
86 | state: RateLimitState = await rate_limiter.peek(key)
87 | assert state == RateLimitState(limit=10, remaining=5, reset_after=5)
88 |
89 | await asyncio.sleep(1)
90 | state: RateLimitState = await rate_limiter.peek(key)
91 | assert state == RateLimitState(limit=10, remaining=6, reset_after=4)
92 |
--------------------------------------------------------------------------------
/tests/asyncio/store/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZhuoZhuoCrayon/throttled-py/b621009701f911abe59e37806753fe24a4b47c94/tests/asyncio/store/__init__.py
--------------------------------------------------------------------------------
/tests/asyncio/store/test_memory.py:
--------------------------------------------------------------------------------
1 | import pytest
2 |
3 | from throttled.asyncio import MemoryStore, constants
4 |
5 |
6 | @pytest.fixture
7 | def store() -> MemoryStore:
8 | return MemoryStore()
9 |
10 |
11 | @pytest.mark.asyncio
12 | class TestMemoryStore:
13 | async def test_set__overflow(self, store: MemoryStore):
14 | timeout: int = 10
15 | size: int = store._backend.max_size
16 | for idx in range(size + 1):
17 | await store.set(str(idx), idx, timeout)
18 |
19 | for idx in range(size + 1):
20 | key: str = str(idx)
21 | exists: bool = idx != 0
22 | if exists:
23 | assert await store.ttl(key) <= timeout
24 | else:
25 | assert await store.ttl(key) == constants.STORE_TTL_STATE_NOT_EXIST
26 |
27 | assert await store.exists(key) is exists
28 |
--------------------------------------------------------------------------------
/tests/asyncio/store/test_store.py:
--------------------------------------------------------------------------------
1 | from typing import Any, Dict, List, Optional, Type
2 |
3 | import pytest
4 |
5 | from throttled.asyncio import BaseStore, constants, exceptions, types
6 |
7 | from ...store import parametrizes
8 |
9 |
10 | @pytest.mark.asyncio
11 | class TestStore:
12 | @parametrizes.STORE_EXISTS_SET_BEFORE
13 | @parametrizes.STORE_EXISTS_KV
14 | async def test_exists(
15 | self,
16 | store: BaseStore,
17 | set_before: bool,
18 | key: types.KeyT,
19 | value: [types.StoreValueT],
20 | ):
21 | if set_before:
22 | await store.set(key, value, 1)
23 |
24 | assert await store.exists(key) is set_before
25 | assert await store.get(key) == (None, value)[set_before]
26 |
27 | @parametrizes.STORE_TTL_KEY
28 | @parametrizes.STORE_TTL_TIMEOUT
29 | async def test_ttl(self, store: BaseStore, key: types.KeyT, timeout: int):
30 | await store.set(key, 1, timeout)
31 | assert timeout == await store.ttl(key)
32 |
33 | async def test_ttl__not_exist(self, store: BaseStore):
34 | assert await store.ttl("key") == constants.STORE_TTL_STATE_NOT_EXIST
35 |
36 | async def test_ttl__not_ttl(self, store: BaseStore):
37 | await store.hset("name", "key", 1)
38 | assert await store.ttl("name") == constants.STORE_TTL_STATE_NOT_TTL
39 |
40 | @parametrizes.STORE_SET_KEY_TIMEOUT
41 | async def test_set(self, store: BaseStore, key: types.KeyT, timeout: int):
42 | await store.set(key, 1, timeout)
43 | assert timeout == await store.ttl(key)
44 |
45 | @parametrizes.store_set_raise_parametrize(exceptions.DataError)
46 | async def test_set__raise(
47 | self,
48 | store: BaseStore,
49 | key: types.KeyT,
50 | timeout: Any,
51 | exc: Type[exceptions.BaseThrottledError],
52 | match: str,
53 | ):
54 | with pytest.raises(exc, match=match):
55 | await store.set(key, 1, timeout)
56 |
57 | @parametrizes.STORE_GET_SET_BEFORE
58 | @parametrizes.STORE_GET_KV
59 | async def test_get(
60 | self,
61 | store: BaseStore,
62 | set_before: bool,
63 | key: types.KeyT,
64 | value: types.StoreValueT,
65 | ):
66 | if set_before:
67 | await store.set(key, value, 1)
68 | assert await store.get(key) == (None, value)[set_before]
69 |
70 | @parametrizes.STORE_HSET_PARAMETRIZE
71 | async def test_hset(
72 | self,
73 | store: BaseStore,
74 | name: types.KeyT,
75 | expect: Dict[types.KeyT, types.StoreValueT],
76 | key: Optional[types.KeyT],
77 | value: Optional[types.StoreValueT],
78 | mapping: Optional[Dict[types.KeyT, types.StoreValueT]],
79 | ):
80 | assert await store.exists(name) is False
81 | assert await store.ttl(name) == constants.STORE_TTL_STATE_NOT_EXIST
82 |
83 | await store.hset(name, key, value, mapping)
84 | assert await store.exists(name) is True
85 | assert await store.ttl(name) == constants.STORE_TTL_STATE_NOT_TTL
86 |
87 | await store.expire(name, 1)
88 | assert await store.ttl(name) == 1
89 | assert await store.hgetall(name) == expect
90 |
91 | @parametrizes.store_hset_raise_parametrize(exceptions.DataError)
92 | async def test_hset__raise(
93 | self,
94 | store: BaseStore,
95 | params: Dict[str, Any],
96 | exc: Type[exceptions.BaseThrottledError],
97 | match: str,
98 | ):
99 | with pytest.raises(exc, match=match):
100 | await store.hset(**params)
101 |
102 | @parametrizes.STORE_HSET_OVERWRITE_PARAMETRIZE
103 | async def test_hset__overwrite(
104 | self,
105 | store: BaseStore,
106 | params_list: List[Dict[str, Any]],
107 | expected_results: List[Dict[types.KeyT, types.StoreValueT]],
108 | ):
109 | key: str = "key"
110 | for params, expected_result in zip(params_list, expected_results):
111 | await store.hset(key, **params)
112 | assert await store.hgetall(key) == expected_result
113 |
114 | @parametrizes.STORE_HGETALL_PARAMETRIZE
115 | async def test_hgetall(
116 | self,
117 | store: BaseStore,
118 | params_list: List[Dict[str, Any]],
119 | expected_results: List[Dict[types.KeyT, types.StoreValueT]],
120 | ):
121 | for params, expected_result in zip(params_list, expected_results):
122 | await store.hset("name", **params)
123 | assert await store.hgetall("name") == expected_result
124 |
--------------------------------------------------------------------------------
/tests/asyncio/test_throttled.py:
--------------------------------------------------------------------------------
1 | from functools import partial
2 | from typing import Any, Callable, Coroutine, Dict
3 |
4 | import pytest
5 |
6 | from throttled.asyncio import (
7 | RateLimiterType,
8 | Throttled,
9 | exceptions,
10 | per_sec,
11 | rate_limiter,
12 | store,
13 | types,
14 | utils,
15 | )
16 |
17 |
18 | @pytest.fixture
19 | def decorated_demo() -> Callable[[int, int], Coroutine]:
20 | @Throttled(
21 | key="/api/product",
22 | using=RateLimiterType.FIXED_WINDOW.value,
23 | quota=rate_limiter.per_min(1),
24 | store=store.MemoryStore(),
25 | )
26 | async def demo(left: int, right: int) -> int:
27 | return left + right
28 |
29 | yield demo
30 |
31 |
32 | @pytest.mark.asyncio
33 | class TestThrottled:
34 | async def test_demo(self, decorated_demo: Callable[[int, int], Coroutine]) -> None:
35 | assert await decorated_demo(1, 2) == 3
36 | with pytest.raises(exceptions.LimitedError):
37 | await decorated_demo(2, 3)
38 |
39 | async def test_limit__timeout(self):
40 | throttle: Throttled = Throttled(timeout=1, quota=per_sec(1))
41 | assert (await throttle.limit("key")).limited is False
42 |
43 | def _callback(
44 | left: float, right: float, elapsed: types.TimeLikeValueT, *args, **kwargs
45 | ):
46 | assert left <= elapsed < right
47 |
48 | async with utils.Timer(callback=partial(_callback, 1, 2)):
49 | assert (await throttle.limit("key")).limited is False
50 |
51 | # case: retry_after > timeout
52 | async with utils.Timer(callback=partial(_callback, 0, 0.1)):
53 | assert (await throttle.limit("key", cost=2)).limited
54 |
55 | # case: timeout < retry_after
56 | async with utils.Timer(callback=partial(_callback, 0, 0.1)):
57 | assert (await throttle.limit("key", timeout=0.5)).limited
58 |
59 | async def test_enter(self):
60 | construct_kwargs: Dict[str, Any] = {
61 | "key": "key",
62 | "quota": per_sec(1),
63 | "store": store.MemoryStore(),
64 | }
65 | throttle: Throttled = Throttled(**construct_kwargs)
66 | async with throttle as rate_limit_result:
67 | assert rate_limit_result.limited is False
68 |
69 | try:
70 | async with throttle:
71 | pass
72 | except exceptions.LimitedError as e:
73 | assert e.rate_limit_result.limited
74 | assert e.rate_limit_result.state.remaining == 0
75 | assert e.rate_limit_result.state.reset_after == 1
76 | assert e.rate_limit_result.state.retry_after == 1
77 |
78 | async with Throttled(**construct_kwargs, timeout=1) as rate_limit_result:
79 | assert not rate_limit_result.limited
80 |
--------------------------------------------------------------------------------
/tests/benchmarks/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZhuoZhuoCrayon/throttled-py/b621009701f911abe59e37806753fe24a4b47c94/tests/benchmarks/__init__.py
--------------------------------------------------------------------------------
/tests/benchmarks/test_throttled.py:
--------------------------------------------------------------------------------
1 | import threading
2 | from typing import Any, Dict, Generator, List
3 |
4 | import pytest
5 | import redis
6 | from redis import Redis
7 |
8 | from throttled import (
9 | BaseStore,
10 | MemoryStore,
11 | Quota,
12 | RateLimiterType,
13 | RedisStore,
14 | Throttled,
15 | per_sec,
16 | )
17 | from throttled.constants import StoreType
18 | from throttled.types import RateLimiterTypeT
19 | from throttled.utils import Benchmark
20 |
21 | REDIS_URL: str = "redis://127.0.0.1:6379/0"
22 |
23 | WORKERS: int = 8
24 |
25 |
26 | def clear_redis(client: redis.Redis) -> None:
27 | keys: List[str] = client.keys("throttled*")
28 | client.delete(*keys)
29 |
30 |
31 | def redis_baseline(client: redis.Redis):
32 | client.incrby("throttled:v2", 1)
33 |
34 |
35 | def memory_baseline(dict_store: Dict[str, int]):
36 | dict_store["throttled:v2"] = dict_store.get("throttled:v2", 0) + 1
37 |
38 |
39 | def memory_with_lock_baseline(lock: threading.RLock, dict_store: Dict[str, int]):
40 | with lock:
41 | memory_baseline(dict_store)
42 |
43 |
44 | def call_api(throttle: Throttled) -> bool:
45 | result = throttle.limit("/ping", cost=1)
46 | return result.limited
47 |
48 |
49 | @pytest.fixture(params=StoreType.choice())
50 | def store(request) -> Generator[BaseStore, Any, None]:
51 | def _create_store(store_type: str) -> BaseStore:
52 | if store_type == StoreType.MEMORY.value:
53 | return MemoryStore()
54 | else:
55 | return RedisStore(server=REDIS_URL)
56 |
57 | store: BaseStore = _create_store(request.param)
58 |
59 | yield store
60 |
61 | if request.param == StoreType.REDIS.value:
62 | clear_redis(store._backend.get_client())
63 |
64 |
65 | @pytest.fixture
66 | def redis_client() -> Generator[Redis, Any, None]:
67 | client: redis.Redis = redis.Redis.from_url(REDIS_URL)
68 |
69 | yield client
70 |
71 | clear_redis(client)
72 |
73 |
74 | @pytest.mark.skip(reason="skip benchmarks")
75 | class TestBenchmarkThrottled:
76 | def test_memory_baseline__serial(self, benchmark: Benchmark):
77 | benchmark.serial(memory_baseline, batch=500_000, dict_store={})
78 |
79 | def test_memory_baseline__concurrent(self, benchmark: Benchmark):
80 | benchmark.concurrent(
81 | memory_with_lock_baseline,
82 | batch=100_000,
83 | workers=WORKERS,
84 | lock=threading.RLock(),
85 | dict_store={},
86 | )
87 |
88 | def test_redis_baseline__serial(
89 | self, benchmark: Benchmark, redis_client: redis.Redis
90 | ):
91 | benchmark.serial(redis_baseline, batch=100_000, client=redis_client)
92 |
93 | def test_redis_baseline__concurrent(
94 | self, benchmark: Benchmark, redis_client: redis.Redis
95 | ):
96 | benchmark.concurrent(
97 | redis_baseline, batch=100_000, workers=WORKERS, client=redis_client
98 | )
99 |
100 | @pytest.mark.parametrize("using", RateLimiterType.choice())
101 | @pytest.mark.parametrize("quota", [per_sec(1_000)])
102 | def test_limit__serial(
103 | self,
104 | benchmark: Benchmark,
105 | store: BaseStore,
106 | using: RateLimiterTypeT,
107 | quota: Quota,
108 | ):
109 | throttle = Throttled(using=using, quota=quota, store=store)
110 | benchmark.serial(call_api, batch=100_000, throttle=throttle)
111 |
112 | @pytest.mark.parametrize("using", RateLimiterType.choice())
113 | @pytest.mark.parametrize("quota", [per_sec(1_000)])
114 | def test_limit__concurrent(
115 | self,
116 | benchmark: Benchmark,
117 | store: BaseStore,
118 | using: RateLimiterTypeT,
119 | quota: Quota,
120 | ):
121 | throttle = Throttled(using=using, quota=quota, store=store)
122 | benchmark.concurrent(call_api, batch=100_000, workers=WORKERS, throttle=throttle)
123 |
--------------------------------------------------------------------------------
/tests/conftest.py:
--------------------------------------------------------------------------------
1 | from typing import Any, Generator
2 |
3 | import pytest
4 | from fakeredis import FakeConnection
5 |
6 | from throttled import BaseStore, MemoryStore, RedisStore
7 | from throttled.constants import StoreType
8 | from throttled.utils import Benchmark
9 |
10 |
11 | @pytest.fixture(params=[StoreType.MEMORY.value, StoreType.REDIS.value])
12 | def store(request) -> Generator[BaseStore, Any, None]:
13 | def _create_store(store_type: str) -> BaseStore:
14 | if store_type == StoreType.MEMORY.value:
15 | return MemoryStore()
16 | else:
17 | return RedisStore(
18 | options={
19 | "REDIS_CLIENT_CLASS": "fakeredis.FakeRedis",
20 | "CONNECTION_POOL_KWARGS": {"connection_class": FakeConnection},
21 | }
22 | )
23 |
24 | store: BaseStore = _create_store(request.param)
25 |
26 | yield store
27 |
28 | if request.param == StoreType.REDIS.value:
29 | store._backend.get_client().flushall()
30 |
31 |
32 | @pytest.fixture(scope="class")
33 | def benchmark() -> Benchmark:
34 | return Benchmark()
35 |
--------------------------------------------------------------------------------
/tests/rate_limiter/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZhuoZhuoCrayon/throttled-py/b621009701f911abe59e37806753fe24a4b47c94/tests/rate_limiter/__init__.py
--------------------------------------------------------------------------------
/tests/rate_limiter/parametrizes.py:
--------------------------------------------------------------------------------
1 | from typing import Any, Dict, List
2 |
3 | import pytest
4 |
5 | from throttled import per_min
6 |
7 | LIMIT_C_QUOTA = pytest.mark.parametrize(
8 | "quota",
9 | [per_min(1, 1), per_min(10, 10), per_min(100, 100), per_min(1_000, 1_000)],
10 | )
11 |
12 | LIMIT_C_REQUESTS_NUM = pytest.mark.parametrize("requests_num", [10, 100, 1_000, 10_000])
13 |
14 |
15 | FIXED_WINDOW_LIMIT_CASES: List[Dict[str, Any]] = [
16 | {"cost": 0, "limited": False, "remaining": 5, "count": 0},
17 | {"cost": 1, "limited": False, "remaining": 4, "count": 1},
18 | {"cost": 4, "limited": False, "remaining": 0, "count": 5},
19 | {"cost": 4, "limited": True, "remaining": 0, "count": 9},
20 | {"cost": 0, "limited": False, "remaining": 0, "count": 9},
21 | ]
22 |
23 | GCRA_LIMIT_CASES: List[Dict[str, Any]] = [
24 | {"cost": 0, "limited": False, "remaining": 10},
25 | {"cost": 1, "limited": False, "remaining": 9},
26 | {"cost": 5, "limited": False, "remaining": 5, "sleep": 1},
27 | {"cost": 5, "limited": False, "remaining": 0},
28 | {"cost": 1, "limited": True, "remaining": 0},
29 | {"cost": 0, "limited": False, "remaining": 0},
30 | ]
31 |
32 | LEAKING_BUCKET_LIMIT_CASES: List[Dict[str, Any]] = [
33 | {"cost": 0, "limited": False, "remaining": 10},
34 | {"cost": 1, "limited": False, "remaining": 9},
35 | {"cost": 10, "limited": True, "remaining": 9, "retry_after": 1},
36 | {"cost": 5, "limited": False, "remaining": 5, "sleep": 1},
37 | {"cost": 8, "limited": True, "remaining": 5, "retry_after": 3},
38 | {"cost": 5, "limited": False, "remaining": 0},
39 | {"cost": 1, "limited": True, "remaining": 0, "retry_after": 1},
40 | {"cost": 0, "limited": False, "remaining": 0},
41 | ]
42 |
43 | TOKEN_BUCKET_LIMIT_CASES: List[Dict[str, Any]] = LEAKING_BUCKET_LIMIT_CASES
44 |
45 | SLIDING_WINDOW_LIMIT_CASES: List[Dict[str, Any]] = [
46 | {"cost": 0, "limited": False, "remaining": 5, "count": 0, "ttl": 3 * 60},
47 | {"cost": 1, "limited": False, "remaining": 4, "count": 1},
48 | {"cost": 4, "limited": False, "remaining": 0, "count": 5},
49 | {"cost": 4, "limited": True, "remaining": 0, "count": 5},
50 | {"cost": 0, "limited": False, "remaining": 0, "count": 5},
51 | ]
52 |
--------------------------------------------------------------------------------
/tests/rate_limiter/test_base.py:
--------------------------------------------------------------------------------
1 | from datetime import timedelta
2 | from typing import Any, Callable, Dict, Optional
3 |
4 | import pytest
5 |
6 | from throttled import Quota, rate_limiter
7 |
8 |
9 | class TestQuota:
10 | @pytest.mark.parametrize(
11 | "per_xx,constructor_kwargs,expect",
12 | [
13 | [rate_limiter.per_sec, {"limit": 10}, {"limit": 10, "burst": 10, "sec": 1}],
14 | [
15 | rate_limiter.per_min,
16 | {"limit": 10},
17 | {"limit": 10, "burst": 10, "sec": 60},
18 | ],
19 | [
20 | rate_limiter.per_hour,
21 | {"limit": 10},
22 | {"limit": 10, "burst": 10, "sec": 3600},
23 | ],
24 | [
25 | rate_limiter.per_day,
26 | {"limit": 10},
27 | {"limit": 10, "burst": 10, "sec": 86400},
28 | ],
29 | [
30 | rate_limiter.per_week,
31 | {"limit": 10},
32 | {"limit": 10, "burst": 10, "sec": 604800},
33 | ],
34 | [
35 | rate_limiter.per_sec,
36 | {"limit": 10, "burst": 5},
37 | {"limit": 10, "burst": 5, "sec": 1},
38 | ],
39 | ],
40 | )
41 | def test_per_xx(
42 | self,
43 | per_xx: Callable[[int, Optional[int]], Quota],
44 | constructor_kwargs: Dict[str, Any],
45 | expect: Dict[str, Any],
46 | ):
47 | quota: Quota = per_xx(**constructor_kwargs)
48 | assert quota.burst == expect["burst"]
49 | assert quota.get_limit() == expect["limit"]
50 | assert quota.get_period_sec() == expect["sec"]
51 |
52 | def test_per_duration(self):
53 | quota: Quota = rate_limiter.per_duration(
54 | timedelta(minutes=2), limit=120, burst=150
55 | )
56 | assert quota.burst == 150
57 | assert quota.get_limit() == 120
58 | assert quota.get_period_sec() == 120
59 |
--------------------------------------------------------------------------------
/tests/rate_limiter/test_fixed_window.py:
--------------------------------------------------------------------------------
1 | from datetime import timedelta
2 | from typing import Any, Callable, Generator, List
3 |
4 | import pytest
5 |
6 | from throttled import (
7 | BaseRateLimiter,
8 | BaseStore,
9 | Quota,
10 | Rate,
11 | RateLimiterRegistry,
12 | RateLimitResult,
13 | RateLimitState,
14 | per_min,
15 | )
16 | from throttled.constants import RateLimiterType
17 | from throttled.utils import Benchmark, now_sec
18 |
19 | from . import parametrizes
20 |
21 |
22 | @pytest.fixture
23 | def rate_limiter_constructor(
24 | store: BaseStore,
25 | ) -> Generator[Callable[[Quota], BaseRateLimiter], Any, None]:
26 | def _create_rate_limiter(quota: Quota) -> BaseRateLimiter:
27 | return RateLimiterRegistry.get(RateLimiterType.FIXED_WINDOW.value)(quota, store)
28 |
29 | yield _create_rate_limiter
30 |
31 |
32 | def assert_rate_limit_result(
33 | limited: bool, remaining: int, quota: Quota, result: RateLimitResult
34 | ):
35 | assert result.limited == limited
36 | assert result.state.limit == quota.get_limit()
37 | assert result.state.remaining == remaining
38 |
39 | period: int = quota.get_period_sec()
40 | assert result.state.reset_after == period - (now_sec() % period)
41 | if result.limited:
42 | assert result.state.retry_after == result.state.reset_after
43 |
44 |
45 | class TestFixedWindowRateLimiter:
46 | def test_limit(self, rate_limiter_constructor: Callable[[Quota], BaseRateLimiter]):
47 | limit: int = 5
48 | period: int = 60
49 | quota: Quota = Quota(Rate(period=timedelta(minutes=1), limit=limit))
50 | assert quota.get_limit() == limit
51 | assert quota.get_period_sec() == period
52 |
53 | rate_limiter: BaseRateLimiter = rate_limiter_constructor(quota)
54 | store_key: str = f"throttled:v1:fixed_window:key:period:{now_sec() // period}"
55 | assert rate_limiter._store.exists(store_key) is False
56 |
57 | # fixture does not support pytest.mark.parametrize scope.
58 | for case in parametrizes.FIXED_WINDOW_LIMIT_CASES:
59 | result: RateLimitResult = rate_limiter.limit("key", cost=case["cost"])
60 | assert_rate_limit_result(case["limited"], case["remaining"], quota, result)
61 | assert rate_limiter._store.get(store_key) == case["count"]
62 |
63 | @parametrizes.LIMIT_C_QUOTA
64 | @parametrizes.LIMIT_C_REQUESTS_NUM
65 | def test_limit__concurrent(
66 | self,
67 | benchmark: Benchmark,
68 | rate_limiter_constructor: Callable[[Quota], BaseRateLimiter],
69 | quota: Quota,
70 | requests_num: int,
71 | ):
72 | rate_limiter: BaseRateLimiter = rate_limiter_constructor(quota)
73 | results: List[bool] = benchmark.concurrent(
74 | task=lambda: rate_limiter.limit("key").limited, batch=requests_num
75 | )
76 |
77 | accessed_num: int = requests_num - sum(results)
78 | limit: int = min(requests_num, quota.get_limit())
79 | # Period boundaries may burst with 2 times the number of requests.
80 | assert limit <= accessed_num <= 2 * limit
81 |
82 | def test_peek(self, rate_limiter_constructor: Callable[[Quota], BaseRateLimiter]):
83 | key: str = "key"
84 | rate_limiter: BaseRateLimiter = rate_limiter_constructor(per_min(1))
85 |
86 | def _assert(_state: RateLimitState):
87 | assert _state.limit == 1
88 | assert _state.reset_after - (60 - (now_sec() % 60)) <= 1
89 |
90 | state: RateLimitState = rate_limiter.peek(key)
91 | _assert(state)
92 | assert state.remaining == 1
93 |
94 | rate_limiter.limit(key)
95 |
96 | state: RateLimitState = rate_limiter.peek(key)
97 | assert state.remaining == 0
98 | _assert(state)
99 |
--------------------------------------------------------------------------------
/tests/rate_limiter/test_gcra.py:
--------------------------------------------------------------------------------
1 | import time
2 | from typing import Callable, List
3 |
4 | import pytest
5 |
6 | from throttled import (
7 | BaseRateLimiter,
8 | BaseStore,
9 | Quota,
10 | RateLimiterRegistry,
11 | RateLimitResult,
12 | RateLimitState,
13 | per_min,
14 | )
15 | from throttled.constants import RateLimiterType
16 | from throttled.types import TimeLikeValueT
17 | from throttled.utils import Benchmark, Timer
18 |
19 | from . import parametrizes
20 |
21 |
22 | @pytest.fixture
23 | def rate_limiter_constructor(store: BaseStore) -> Callable[[Quota], BaseRateLimiter]:
24 | def _create_rate_limiter(quota: Quota) -> BaseRateLimiter:
25 | return RateLimiterRegistry.get(RateLimiterType.GCRA.value)(quota, store)
26 |
27 | return _create_rate_limiter
28 |
29 |
30 | def assert_rate_limit_result(
31 | limited: bool, remaining: int, quota: Quota, result: RateLimitResult
32 | ):
33 | assert result.limited == limited
34 | assert result.state.limit == quota.burst
35 | assert remaining - result.state.remaining <= 1
36 | assert quota.burst - remaining - result.state.reset_after < 0.1
37 |
38 | if result.limited:
39 | assert 1 - result.state.retry_after < 0.1
40 | else:
41 | assert result.state.retry_after == 0
42 |
43 |
44 | class TestGCRARateLimiter:
45 | def test_limit(self, rate_limiter_constructor: Callable[[Quota], BaseRateLimiter]):
46 | quota: Quota = per_min(limit=60, burst=10)
47 | rate_limiter: BaseRateLimiter = rate_limiter_constructor(quota)
48 | for case in parametrizes.GCRA_LIMIT_CASES:
49 | if "sleep" in case:
50 | time.sleep(case["sleep"])
51 |
52 | result: RateLimitResult = rate_limiter.limit("key", cost=case["cost"])
53 | assert_rate_limit_result(case["limited"], case["remaining"], quota, result)
54 |
55 | @parametrizes.LIMIT_C_QUOTA
56 | @parametrizes.LIMIT_C_REQUESTS_NUM
57 | def test_limit__concurrent(
58 | self,
59 | benchmark: Benchmark,
60 | rate_limiter_constructor: Callable[[Quota], BaseRateLimiter],
61 | quota: Quota,
62 | requests_num: int,
63 | ):
64 | def _callback(elapsed: TimeLikeValueT, *args, **kwargs):
65 | accessed_num: int = requests_num - sum(results)
66 | limit: int = min(requests_num, quota.get_limit())
67 | rate: float = quota.get_limit() / quota.get_period_sec()
68 | assert limit <= accessed_num <= limit + (elapsed + 2) * rate
69 |
70 | with Timer(callback=_callback):
71 | rate_limiter: BaseRateLimiter = rate_limiter_constructor(quota)
72 | results: List[bool] = benchmark.concurrent(
73 | task=lambda: rate_limiter.limit("key").limited, batch=requests_num
74 | )
75 |
76 | def test_peek(self, rate_limiter_constructor: Callable[[Quota], BaseRateLimiter]):
77 | key: str = "key"
78 | quota: Quota = per_min(limit=60, burst=10)
79 | rate_limiter: BaseRateLimiter = rate_limiter_constructor(quota)
80 |
81 | state: RateLimitState = rate_limiter.peek(key)
82 | assert state == RateLimitState(limit=10, remaining=10, reset_after=0)
83 |
84 | rate_limiter.limit(key, cost=5)
85 | state: RateLimitState = rate_limiter.peek(key)
86 | assert state.limit == 10 and state.remaining == 5
87 | assert 5 - state.reset_after < 0.1
88 |
89 | time.sleep(1)
90 | state: RateLimitState = rate_limiter.peek(key)
91 | assert state.limit == 10 and state.remaining == 6
92 | assert 4 - state.reset_after < 0.1
93 |
94 | rate_limiter.limit(key, cost=6)
95 | state: RateLimitState = rate_limiter.peek(key)
96 | assert state.remaining == 0
97 | assert 10 - state.reset_after < 0.1
98 |
--------------------------------------------------------------------------------
/tests/rate_limiter/test_leaking_bucket.py:
--------------------------------------------------------------------------------
1 | import time
2 | from typing import Any, Callable, Dict, List
3 |
4 | import pytest
5 |
6 | from throttled import (
7 | BaseRateLimiter,
8 | BaseStore,
9 | Quota,
10 | RateLimiterRegistry,
11 | RateLimitResult,
12 | RateLimitState,
13 | per_min,
14 | )
15 | from throttled.constants import RateLimiterType
16 | from throttled.types import TimeLikeValueT
17 | from throttled.utils import Benchmark, Timer
18 |
19 | from . import parametrizes
20 |
21 |
22 | @pytest.fixture
23 | def rate_limiter_constructor(store: BaseStore) -> Callable[[Quota], BaseRateLimiter]:
24 | def _create_rate_limiter(quota: Quota) -> BaseRateLimiter:
25 | return RateLimiterRegistry.get(RateLimiterType.LEAKING_BUCKET.value)(
26 | quota, store
27 | )
28 |
29 | return _create_rate_limiter
30 |
31 |
32 | def assert_rate_limit_result(
33 | case: Dict[str, Any], quota: Quota, result: RateLimitResult
34 | ):
35 | assert result.limited == case["limited"]
36 | assert result.state.limit == quota.burst
37 | assert result.state.remaining == case["remaining"]
38 | assert result.state.reset_after == quota.burst - case["remaining"]
39 | assert result.state.retry_after == case.get("retry_after", 0)
40 |
41 |
42 | class TestLeakingBucketRateLimiter:
43 | def test_limit(self, rate_limiter_constructor: Callable[[Quota], BaseRateLimiter]):
44 | quota: Quota = per_min(limit=60, burst=10)
45 | rate_limiter: BaseRateLimiter = rate_limiter_constructor(quota)
46 | for case in parametrizes.LEAKING_BUCKET_LIMIT_CASES:
47 | if "sleep" in case:
48 | time.sleep(case["sleep"])
49 |
50 | result: RateLimitResult = rate_limiter.limit("key", cost=case["cost"])
51 | assert_rate_limit_result(case, quota, result)
52 |
53 | @parametrizes.LIMIT_C_QUOTA
54 | @parametrizes.LIMIT_C_REQUESTS_NUM
55 | def test_limit__concurrent(
56 | self,
57 | benchmark: Benchmark,
58 | rate_limiter_constructor: Callable[[Quota], BaseRateLimiter],
59 | quota: Quota,
60 | requests_num: int,
61 | ):
62 | def _callback(elapsed: TimeLikeValueT, *args, **kwargs):
63 | accessed_num: int = requests_num - sum(results)
64 | limit: int = min(requests_num, quota.get_limit())
65 | rate: float = quota.get_limit() / quota.get_period_sec()
66 | assert limit <= accessed_num <= limit + (elapsed + 5) * rate
67 |
68 | with Timer(callback=_callback):
69 | rate_limiter: BaseRateLimiter = rate_limiter_constructor(quota)
70 | results: List[bool] = benchmark.concurrent(
71 | task=lambda: rate_limiter.limit("key").limited, batch=requests_num
72 | )
73 |
74 | def test_peek(self, rate_limiter_constructor: Callable[[Quota], BaseRateLimiter]):
75 | key: str = "key"
76 | quota: Quota = per_min(limit=60, burst=10)
77 | rate_limiter: BaseRateLimiter = rate_limiter_constructor(quota)
78 |
79 | state: RateLimitState = rate_limiter.peek(key)
80 | assert state == RateLimitState(limit=10, remaining=10, reset_after=0)
81 |
82 | rate_limiter.limit(key, cost=5)
83 | state: RateLimitState = rate_limiter.peek(key)
84 | assert state == RateLimitState(limit=10, remaining=5, reset_after=5)
85 |
86 | time.sleep(1)
87 | state: RateLimitState = rate_limiter.peek(key)
88 | assert state.limit == 10
89 | assert 6 - state.remaining <= 1
90 | assert 4 - state.reset_after <= 4
91 |
--------------------------------------------------------------------------------
/tests/rate_limiter/test_sliding_window.py:
--------------------------------------------------------------------------------
1 | import math
2 | from datetime import timedelta
3 | from typing import Any, Callable, Generator, List
4 |
5 | import pytest
6 |
7 | from throttled import (
8 | BaseRateLimiter,
9 | BaseStore,
10 | Quota,
11 | Rate,
12 | RateLimiterRegistry,
13 | RateLimitResult,
14 | RateLimitState,
15 | per_min,
16 | )
17 | from throttled.constants import RateLimiterType
18 | from throttled.types import TimeLikeValueT
19 | from throttled.utils import Benchmark, Timer, now_sec
20 |
21 | from . import parametrizes
22 |
23 |
24 | @pytest.fixture
25 | def rate_limiter_constructor(
26 | store: BaseStore,
27 | ) -> Generator[Callable[[Quota], BaseRateLimiter], Any, None]:
28 | def _create_rate_limiter(quota: Quota) -> BaseRateLimiter:
29 | return RateLimiterRegistry.get(RateLimiterType.SLIDING_WINDOW.value)(
30 | quota, store
31 | )
32 |
33 | yield _create_rate_limiter
34 |
35 |
36 | def assert_rate_limit_result(
37 | limited: bool, remaining: int, quota: Quota, result: RateLimitResult
38 | ):
39 | assert result.limited == limited
40 | assert result.state.limit == quota.get_limit()
41 | assert result.state.remaining == remaining
42 | assert result.state.reset_after == quota.get_period_sec()
43 |
44 |
45 | class TestSlidingWindowRateLimiter:
46 | def test_limit(self, rate_limiter_constructor: Callable[[Quota], BaseRateLimiter]):
47 | limit: int = 5
48 | period: int = 60
49 | quota: Quota = Quota(Rate(period=timedelta(minutes=1), limit=limit))
50 | assert quota.get_limit() == limit
51 | assert quota.get_period_sec() == period
52 |
53 | rate_limiter: BaseRateLimiter = rate_limiter_constructor(quota)
54 | store_key: str = f"throttled:v1:sliding_window:key:period:{now_sec() // period}"
55 | assert rate_limiter._store.exists(store_key) is False
56 |
57 | for case in parametrizes.SLIDING_WINDOW_LIMIT_CASES:
58 | result: RateLimitResult = rate_limiter.limit("key", cost=case["cost"])
59 | assert_rate_limit_result(case["limited"], case["remaining"], quota, result)
60 | assert rate_limiter._store.get(store_key) == case["count"]
61 | if "ttl" in case:
62 | assert rate_limiter._store.ttl(store_key) == case["ttl"]
63 |
64 | @parametrizes.LIMIT_C_QUOTA
65 | @parametrizes.LIMIT_C_REQUESTS_NUM
66 | def test_limit__concurrent(
67 | self,
68 | benchmark: Benchmark,
69 | rate_limiter_constructor: Callable[[Quota], BaseRateLimiter],
70 | quota: Quota,
71 | requests_num: int,
72 | ):
73 | def _callback(elapsed: TimeLikeValueT, *args, **kwargs):
74 | accessed_num: int = requests_num - sum(results)
75 | limit: int = min(requests_num, quota.get_limit())
76 | assert abs(accessed_num - limit) <= math.ceil(
77 | (elapsed + 2) * quota.fill_rate
78 | )
79 |
80 | with Timer(callback=_callback):
81 | rate_limiter: BaseRateLimiter = rate_limiter_constructor(quota)
82 | results: List[bool] = benchmark.concurrent(
83 | task=lambda: rate_limiter.limit("key").limited, batch=requests_num
84 | )
85 |
86 | def test_peek(self, rate_limiter_constructor: Callable[[Quota], BaseRateLimiter]):
87 | key: str = "key"
88 | rate_limiter: BaseRateLimiter = rate_limiter_constructor(per_min(1))
89 | assert rate_limiter.peek(key) == RateLimitState(
90 | limit=1, remaining=1, reset_after=60
91 | )
92 | rate_limiter.limit(key)
93 | assert rate_limiter.peek(key) == RateLimitState(
94 | limit=1, remaining=0, reset_after=60
95 | )
96 |
--------------------------------------------------------------------------------
/tests/rate_limiter/test_token_bucket.py:
--------------------------------------------------------------------------------
1 | import time
2 | from typing import Any, Callable, Dict, Generator
3 |
4 | import pytest
5 |
6 | from throttled import (
7 | BaseRateLimiter,
8 | BaseStore,
9 | Quota,
10 | RateLimiterRegistry,
11 | RateLimitResult,
12 | RateLimitState,
13 | per_min,
14 | )
15 | from throttled.constants import RateLimiterType
16 | from throttled.types import TimeLikeValueT
17 | from throttled.utils import Benchmark, Timer
18 |
19 | from . import parametrizes
20 |
21 |
22 | @pytest.fixture
23 | def rate_limiter_constructor(
24 | store: BaseStore,
25 | ) -> Generator[Callable[[Quota], BaseRateLimiter], Any, None]:
26 | def _create_rate_limiter(quota: Quota) -> BaseRateLimiter:
27 | return RateLimiterRegistry.get(RateLimiterType.TOKEN_BUCKET.value)(quota, store)
28 |
29 | yield _create_rate_limiter
30 |
31 |
32 | def assert_rate_limit_result(
33 | case: Dict[str, Any], quota: Quota, result: RateLimitResult
34 | ):
35 | assert result.limited == case["limited"]
36 | assert result.state.limit == quota.burst
37 | assert result.state.remaining == case["remaining"]
38 | assert result.state.reset_after == quota.burst - case["remaining"]
39 | assert result.state.retry_after == case.get("retry_after", 0)
40 |
41 |
42 | class TestTokenBucketRateLimiter:
43 | def test_limit(self, rate_limiter_constructor: Callable[[Quota], BaseRateLimiter]):
44 | quota: Quota = per_min(limit=60, burst=10)
45 | rate_limiter: BaseRateLimiter = rate_limiter_constructor(quota)
46 | for case in parametrizes.TOKEN_BUCKET_LIMIT_CASES:
47 | if "sleep" in case:
48 | time.sleep(case["sleep"])
49 |
50 | result: RateLimitResult = rate_limiter.limit("key", cost=case["cost"])
51 | assert_rate_limit_result(case, quota, result)
52 |
53 | @parametrizes.LIMIT_C_QUOTA
54 | @parametrizes.LIMIT_C_REQUESTS_NUM
55 | def test_limit__concurrent(
56 | self,
57 | benchmark: Benchmark,
58 | rate_limiter_constructor: Callable[[Quota], BaseRateLimiter],
59 | quota: Quota,
60 | requests_num: int,
61 | ):
62 | def _callback(elapsed: TimeLikeValueT, *args, **kwargs):
63 | accessed_num: int = requests_num - sum(results)
64 | limit: int = min(requests_num, quota.get_limit())
65 |
66 | assert accessed_num >= limit
67 | assert accessed_num <= limit + (elapsed + 6) * quota.fill_rate
68 |
69 | with Timer(callback=_callback):
70 | rate_limiter: BaseRateLimiter = rate_limiter_constructor(quota)
71 | results = benchmark.concurrent(
72 | task=lambda: rate_limiter.limit("key").limited, batch=requests_num
73 | )
74 |
75 | def test_peek(self, rate_limiter_constructor: Callable[[Quota], BaseRateLimiter]):
76 | key: str = "key"
77 | quota: Quota = per_min(limit=60, burst=10)
78 | rate_limiter: BaseRateLimiter = rate_limiter_constructor(quota)
79 |
80 | state: RateLimitState = rate_limiter.peek(key)
81 | assert state == RateLimitState(limit=10, remaining=10, reset_after=0)
82 |
83 | rate_limiter.limit(key, cost=5)
84 | state: RateLimitState = rate_limiter.peek(key)
85 | assert state == RateLimitState(limit=10, remaining=5, reset_after=5)
86 |
87 | time.sleep(1)
88 | state: RateLimitState = rate_limiter.peek(key)
89 | assert state in [
90 | RateLimitState(limit=10, remaining=6, reset_after=4),
91 | RateLimitState(limit=10, remaining=7, reset_after=3),
92 | ]
93 |
--------------------------------------------------------------------------------
/tests/store/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZhuoZhuoCrayon/throttled-py/b621009701f911abe59e37806753fe24a4b47c94/tests/store/__init__.py
--------------------------------------------------------------------------------
/tests/store/parametrizes.py:
--------------------------------------------------------------------------------
1 | from datetime import timedelta
2 | from typing import Type
3 |
4 | import pytest
5 |
6 | STORE_EXISTS_SET_BEFORE = pytest.mark.parametrize(
7 | "set_before", [True, False], ids=["set", "not set"]
8 | )
9 |
10 | STORE_EXISTS_KV = pytest.mark.parametrize("key, value", [("one", 1)], ids=["one"])
11 |
12 | STORE_TTL_KEY = pytest.mark.parametrize("key", ["key"])
13 |
14 | STORE_TTL_TIMEOUT = pytest.mark.parametrize(
15 | "timeout",
16 | [
17 | int(timedelta(seconds=1).total_seconds()),
18 | int(timedelta(minutes=1).total_seconds()),
19 | int(timedelta(hours=1).total_seconds()),
20 | int(timedelta(days=1).total_seconds()),
21 | int(timedelta(weeks=1).total_seconds()),
22 | int(timedelta(days=30).total_seconds()),
23 | int(timedelta(days=365).total_seconds()),
24 | ],
25 | )
26 |
27 | STORE_SET_KEY_TIMEOUT = pytest.mark.parametrize("key,timeout", [("one", 1)])
28 |
29 |
30 | def store_set_raise_parametrize(data_error: Type[BaseException]):
31 | return pytest.mark.parametrize(
32 | "key,timeout,exc,match",
33 | [
34 | ["key", 0, data_error, "Invalid timeout"],
35 | ["key", -1, data_error, "Invalid timeout"],
36 | ["key", 0.1, data_error, "Invalid timeout"],
37 | ["key", "aaaa", data_error, "Invalid timeout"],
38 | ["key", timedelta(minutes=1), data_error, "Invalid timeout"],
39 | ],
40 | ids=["zero", "negative", "float", "string", "object"],
41 | )
42 |
43 |
44 | STORE_GET_SET_BEFORE = pytest.mark.parametrize(
45 | "set_before", [True, False], ids=["set", "not set"]
46 | )
47 |
48 |
49 | STORE_GET_KV = pytest.mark.parametrize(
50 | "key, value",
51 | [
52 | ("one", 1),
53 | ("two", 1e100),
54 | ("three", 1e-10),
55 | ("/product/?a=1#/////", 1),
56 | ("🐶", 0.1),
57 | ("?book=《活着》", 1),
58 | ("long text" * 1000, 1),
59 | ("127.0.0.1", 1),
60 | ("0000:0000:0000:0000:0000:FFFF:0CFF:0001", 1),
61 | ],
62 | ids=[
63 | "value(integer)",
64 | "value(big integer)",
65 | "value(float)",
66 | "key(url)",
67 | "key(emoji)",
68 | "key(zh)",
69 | "key(long text)",
70 | "key(IPv4)",
71 | "key(IPv6)",
72 | ],
73 | )
74 |
75 | STORE_HSET_PARAMETRIZE = pytest.mark.parametrize(
76 | "name,expect,key,value,mapping",
77 | [
78 | ["one", {"k1": 1}, "k1", 1, None],
79 | ["one", {"中文": 1}, "中文", 1, None],
80 | ["one", {"🐶": 1}, "🐶", 1, None],
81 | ["one", {"🐶": 1}, "🐶", 1, {}],
82 | ["one", {"🐶": 1, "k1": 1, "k2": 2}, "🐶", 1, {"k1": 1, "k2": 2}],
83 | ],
84 | )
85 |
86 | STORE_HSET_OVERWRITE_PARAMETRIZE = pytest.mark.parametrize(
87 | ("params_list", "expected_results"),
88 | [
89 | [
90 | [
91 | {"key": "k1", "value": 1},
92 | {"key": "k1", "value": 2},
93 | {"mapping": {"k1": 3}},
94 | {"mapping": {"k1": 1, "k2": 2}},
95 | {"key": "k3", "value": 3},
96 | ],
97 | [
98 | {"k1": 1},
99 | {"k1": 2},
100 | {"k1": 3},
101 | {"k1": 1, "k2": 2},
102 | {"k1": 1, "k2": 2, "k3": 3},
103 | ],
104 | ]
105 | ],
106 | )
107 |
108 |
109 | STORE_HGETALL_PARAMETRIZE = pytest.mark.parametrize(
110 | "params_list, expected_results",
111 | [
112 | [
113 | [
114 | {"key": "k0", "value": 0},
115 | {"key": "k1", "value": 1},
116 | {"key": "k2", "value": 2},
117 | ],
118 | [{"k0": 0}, {"k0": 0, "k1": 1}, {"k0": 0, "k1": 1, "k2": 2}],
119 | ]
120 | ],
121 | )
122 |
123 |
124 | def store_hset_raise_parametrize(data_error: Type[BaseException]):
125 | return pytest.mark.parametrize(
126 | "params, exc, match",
127 | [
128 | [{"name": "key"}, data_error, "hset must with key value pairs"],
129 | [
130 | {"name": "key", "mapping": {}},
131 | data_error,
132 | "hset must with key value pairs",
133 | ],
134 | ],
135 | )
136 |
--------------------------------------------------------------------------------
/tests/store/test_memory.py:
--------------------------------------------------------------------------------
1 | import pytest
2 |
3 | from throttled import MemoryStore
4 | from throttled.constants import STORE_TTL_STATE_NOT_EXIST
5 |
6 |
7 | @pytest.fixture
8 | def store() -> MemoryStore:
9 | return MemoryStore()
10 |
11 |
12 | class TestMemoryStore:
13 | def test_set__overflow(self, store: MemoryStore):
14 | timeout: int = 10
15 | size: int = store._backend.max_size
16 | for idx in range(size + 1):
17 | store.set(str(idx), idx, timeout)
18 |
19 | for idx in range(size + 1):
20 | key: str = str(idx)
21 | exists: bool = idx != 0
22 | if exists:
23 | assert store.ttl(key) <= timeout
24 | else:
25 | assert store.ttl(key) == STORE_TTL_STATE_NOT_EXIST
26 |
27 | assert store.exists(key) is exists
28 |
--------------------------------------------------------------------------------
/tests/store/test_redis_pool.py:
--------------------------------------------------------------------------------
1 | from typing import Any, Dict, Optional, Type
2 |
3 | import pytest
4 |
5 | from throttled.store import BaseConnectionFactory, get_connection_factory
6 |
7 |
8 | class TestRedisPool:
9 | def test_get_connection_factory(self):
10 | cls: BaseConnectionFactory = get_connection_factory()
11 | assert cls.__module__ == "throttled.store.redis_pool"
12 | assert cls.__class__.__name__ == "ConnectionFactory"
13 |
14 | @pytest.mark.parametrize(
15 | ("path", "options", "exc", "match"),
16 | [
17 | [
18 | None,
19 | {"CONNECTION_POOL_CLASS": "no-exists.redis.connection.ConnectionPool"},
20 | ImportError,
21 | # match is a regex pattern, so we need to escape the backslashes.
22 | 'pip install "throttled-py\\[redis\\]"',
23 | ],
24 | [
25 | "no-exists.throttled.store.ConnectionFactory",
26 | None,
27 | ImportError,
28 | "No module named 'no-exists'",
29 | ],
30 | [
31 | "throttled.store.NotExistsConnectionFactory",
32 | None,
33 | ImportError,
34 | 'does not define a "NotExistsConnectionFactory"',
35 | ],
36 | ["ABC", None, ImportError, "ABC doesn't look like a module path"],
37 | ],
38 | ids=[
39 | "ConnectionPool import error",
40 | "Module not found",
41 | "Class not found",
42 | "Invalid module path",
43 | ],
44 | )
45 | def test_get_connection_factory__raise(
46 | self,
47 | path: Optional[str],
48 | options: Optional[Dict[str, Any]],
49 | exc: Type[BaseException],
50 | match: str,
51 | ):
52 | with pytest.raises(exc, match=match):
53 | get_connection_factory(path, options)
54 |
--------------------------------------------------------------------------------
/tests/store/test_store.py:
--------------------------------------------------------------------------------
1 | from typing import Any, Dict, List, Optional, Type
2 |
3 | import pytest
4 |
5 | from throttled import BaseStore
6 | from throttled.constants import STORE_TTL_STATE_NOT_EXIST, STORE_TTL_STATE_NOT_TTL
7 | from throttled.exceptions import BaseThrottledError, DataError
8 | from throttled.types import KeyT, StoreValueT
9 |
10 | from . import parametrizes
11 |
12 |
13 | class TestStore:
14 | @parametrizes.STORE_EXISTS_SET_BEFORE
15 | @parametrizes.STORE_EXISTS_KV
16 | def test_exists(
17 | self, store: BaseStore, set_before: bool, key: KeyT, value: [StoreValueT]
18 | ):
19 | if set_before:
20 | store.set(key, value, 1)
21 |
22 | assert store.exists(key) is set_before
23 | assert store.get(key) == (None, value)[set_before]
24 |
25 | @parametrizes.STORE_TTL_KEY
26 | @parametrizes.STORE_TTL_TIMEOUT
27 | def test_ttl(self, store: BaseStore, key: KeyT, timeout: int):
28 | store.set(key, 1, timeout)
29 | assert timeout == store.ttl(key)
30 |
31 | def test_ttl__not_exist(self, store: BaseStore):
32 | assert store.ttl("key") == STORE_TTL_STATE_NOT_EXIST
33 |
34 | def test_ttl__not_ttl(self, store: BaseStore):
35 | store.hset("name", "key", 1)
36 | assert store.ttl("name") == STORE_TTL_STATE_NOT_TTL
37 |
38 | @parametrizes.STORE_SET_KEY_TIMEOUT
39 | def test_set(self, store: BaseStore, key: KeyT, timeout: int):
40 | store.set(key, 1, timeout)
41 | assert timeout == store.ttl(key)
42 |
43 | @parametrizes.store_set_raise_parametrize(DataError)
44 | def test_set__raise(
45 | self,
46 | store: BaseStore,
47 | key: KeyT,
48 | timeout: Any,
49 | exc: Type[BaseThrottledError],
50 | match: str,
51 | ):
52 | with pytest.raises(exc, match=match):
53 | store.set(key, 1, timeout)
54 |
55 | @parametrizes.STORE_GET_SET_BEFORE
56 | @parametrizes.STORE_GET_KV
57 | def test_get(
58 | self, store: BaseStore, set_before: bool, key: KeyT, value: StoreValueT
59 | ):
60 | if set_before:
61 | store.set(key, value, 1)
62 | assert store.get(key) == (None, value)[set_before]
63 |
64 | @parametrizes.STORE_HSET_PARAMETRIZE
65 | def test_hset(
66 | self,
67 | store: BaseStore,
68 | name: KeyT,
69 | expect: Dict[KeyT, StoreValueT],
70 | key: Optional[KeyT],
71 | value: Optional[StoreValueT],
72 | mapping: Optional[Dict[KeyT, StoreValueT]],
73 | ):
74 | assert store.exists(name) is False
75 | assert store.ttl(name) == STORE_TTL_STATE_NOT_EXIST
76 | store.hset(name, key, value, mapping)
77 | assert store.exists(name) is True
78 | assert store.ttl(name) == STORE_TTL_STATE_NOT_TTL
79 | store.expire(name, 1)
80 | assert store.ttl(name) == 1
81 | assert store.hgetall(name) == expect
82 |
83 | @parametrizes.store_hset_raise_parametrize(DataError)
84 | def test_hset__raise(
85 | self,
86 | store: BaseStore,
87 | params: Dict[str, Any],
88 | exc: Type[BaseThrottledError],
89 | match: str,
90 | ):
91 | with pytest.raises(exc, match=match):
92 | store.hset(**params)
93 |
94 | @parametrizes.STORE_HSET_OVERWRITE_PARAMETRIZE
95 | def test_hset__overwrite(
96 | self,
97 | store: BaseStore,
98 | params_list: List[Dict[str, Any]],
99 | expected_results: List[Dict[KeyT, StoreValueT]],
100 | ):
101 | key: str = "key"
102 | for params, expected_result in zip(params_list, expected_results):
103 | store.hset(key, **params)
104 | assert store.hgetall(key) == expected_result
105 |
106 | @parametrizes.STORE_HGETALL_PARAMETRIZE
107 | def test_hgetall(
108 | self,
109 | store: BaseStore,
110 | params_list: List[Dict[str, Any]],
111 | expected_results: List[Dict[KeyT, StoreValueT]],
112 | ):
113 | for params, expected_result in zip(params_list, expected_results):
114 | store.hset("name", **params)
115 | assert store.hgetall("name") == expected_result
116 |
--------------------------------------------------------------------------------
/tests/test_throttled.py:
--------------------------------------------------------------------------------
1 | from functools import partial
2 | from typing import Any, Callable, Dict, Type
3 |
4 | import pytest
5 |
6 | from throttled import RateLimiterType, Throttled, per_sec, rate_limiter, store
7 | from throttled.exceptions import BaseThrottledError, DataError, LimitedError
8 | from throttled.types import TimeLikeValueT
9 | from throttled.utils import Timer
10 |
11 |
12 | @pytest.fixture
13 | def decorated_demo() -> Callable:
14 | @Throttled(
15 | key="/api/product",
16 | using=RateLimiterType.FIXED_WINDOW.value,
17 | quota=rate_limiter.per_min(1),
18 | store=store.MemoryStore(),
19 | )
20 | def demo(left: int, right: int) -> int:
21 | return left + right
22 |
23 | yield demo
24 |
25 |
26 | class TestThrottled:
27 | def test_demo(self, decorated_demo: Callable) -> None:
28 | assert decorated_demo(1, 2) == 3
29 | with pytest.raises(LimitedError):
30 | decorated_demo(2, 3)
31 |
32 | @pytest.mark.parametrize(
33 | "constructor_kwargs,exc,match",
34 | [
35 | [{"timeout": -2}, DataError, "Invalid timeout"],
36 | [{"timeout": "a"}, DataError, "Invalid timeout"],
37 | [{"timeout": -1.1}, DataError, "Invalid timeout"],
38 | [{"timeout": 0}, DataError, "Invalid timeout"],
39 | [{"timeout": 0.0}, DataError, "Invalid timeout"],
40 | [{"timeout": -0.0}, DataError, "Invalid timeout"],
41 | ],
42 | )
43 | def test_constructor__raise(
44 | self,
45 | constructor_kwargs: Dict[str, Any],
46 | exc: Type[BaseThrottledError],
47 | match: str,
48 | ):
49 | with pytest.raises(exc, match=match):
50 | Throttled(**constructor_kwargs)
51 |
52 | def test_get_key(self):
53 | throttle: Throttled = Throttled(key="key")
54 | assert throttle._get_key() == "key"
55 | assert throttle._get_key(key="override_key") == "override_key"
56 | assert throttle._get_key(key="") == "key"
57 | assert throttle._get_key(key=None) == "key"
58 |
59 | for _throttle in [Throttled(), Throttled(key=""), Throttled(key=None)]:
60 | with pytest.raises(DataError, match="Invalid key"):
61 | _throttle(lambda _: None)
62 |
63 | with pytest.raises(DataError, match="Invalid key"):
64 | _throttle._get_key()
65 |
66 | with pytest.raises(DataError, match="Invalid key"):
67 | _throttle._get_key(key="")
68 |
69 | assert _throttle._get_key(key="override_key") == "override_key"
70 |
71 | def test_get_timeout(self):
72 | throttle: Throttled = Throttled(timeout=10)
73 | assert throttle._get_timeout() == 10
74 | assert throttle._get_timeout(timeout=20) == 20
75 | assert throttle._get_timeout(timeout=-1) == -1
76 |
77 | with pytest.raises(DataError, match="Invalid timeout"):
78 | throttle._get_timeout(timeout=0)
79 |
80 | with pytest.raises(DataError, match="Invalid timeout"):
81 | throttle._get_timeout(timeout=-2)
82 |
83 | def test_limit__timeout(self):
84 | throttle: Throttled = Throttled(timeout=1, quota=per_sec(1))
85 | assert not throttle.limit("key").limited
86 |
87 | def _callback(
88 | left: float, right: float, elapsed: TimeLikeValueT, *args, **kwargs
89 | ):
90 | assert left <= elapsed < right
91 |
92 | with Timer(callback=partial(_callback, 1, 2)):
93 | assert not throttle.limit("key").limited
94 |
95 | # case: retry_after > timeout
96 | with Timer(callback=partial(_callback, 0, 0.1)):
97 | assert throttle.limit("key", cost=2).limited
98 |
99 | # case: timeout < retry_after
100 | with Timer(callback=partial(_callback, 0, 0.1)):
101 | assert throttle.limit("key", timeout=0.5).limited
102 |
103 | def test_enter(self):
104 | mem_store: store.MemoryStore = store.MemoryStore()
105 | construct_kwargs: Dict[str, Any] = {
106 | "key": "key",
107 | "quota": per_sec(1),
108 | "store": mem_store,
109 | }
110 | throttle: Throttled = Throttled(**construct_kwargs)
111 | with throttle as rate_limit_result:
112 | assert not rate_limit_result.limited
113 |
114 | try:
115 | with throttle:
116 | pass
117 | except LimitedError as e:
118 | assert e.rate_limit_result.limited
119 | assert e.rate_limit_result.state.remaining == 0
120 | assert e.rate_limit_result.state.reset_after == 1
121 | assert e.rate_limit_result.state.retry_after == 1
122 |
123 | with Throttled(**construct_kwargs, timeout=1) as rate_limit_result:
124 | assert not rate_limit_result.limited
125 |
--------------------------------------------------------------------------------
/tests/test_utils.py:
--------------------------------------------------------------------------------
1 | from typing import Any, Optional, Union
2 |
3 | import pytest
4 |
5 | from throttled.types import KeyT
6 | from throttled.utils import format_key, to_bool
7 |
8 |
9 | class TestUtils:
10 | @pytest.mark.parametrize(
11 | ["value", "expected"],
12 | [
13 | (None, None),
14 | ("", None),
15 | ("0", False),
16 | ("F", False),
17 | ("FALSE", False),
18 | ("N", False),
19 | ("NO", False),
20 | ("1", True),
21 | ("T", True),
22 | ("TRUE", True),
23 | ("Y", True),
24 | ("YES", True),
25 | (1, True),
26 | (0, False),
27 | ],
28 | )
29 | def test_to_bool(self, value: Any, expected: Optional[bool]):
30 | assert to_bool(value) == expected
31 |
32 | @pytest.mark.parametrize(
33 | ["key", "expect"],
34 | [
35 | (b"key", "key"),
36 | (b"key\x00", "key\x00"),
37 | ("key", "key"),
38 | ("key\x00", "key\x00"),
39 | (b"\x00", "\x00"),
40 | ("", ""),
41 | ],
42 | )
43 | def test_format_key(self, key: Union[bytes, str], expect: KeyT):
44 | assert format_key(key) == expect
45 |
--------------------------------------------------------------------------------
/tests/utils.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZhuoZhuoCrayon/throttled-py/b621009701f911abe59e37806753fe24a4b47c94/tests/utils.py
--------------------------------------------------------------------------------
/throttled/__init__.py:
--------------------------------------------------------------------------------
1 | from . import asyncio, rate_limiter
2 | from .constants import RateLimiterType
3 | from .rate_limiter import (
4 | BaseRateLimiter,
5 | Quota,
6 | Rate,
7 | RateLimiterMeta,
8 | RateLimiterRegistry,
9 | RateLimitResult,
10 | RateLimitState,
11 | per_day,
12 | per_duration,
13 | per_hour,
14 | per_min,
15 | per_sec,
16 | per_week,
17 | )
18 | from .store import (
19 | BaseAtomicAction,
20 | BaseStore,
21 | BaseStoreBackend,
22 | MemoryStore,
23 | MemoryStoreBackend,
24 | RedisStore,
25 | RedisStoreBackend,
26 | )
27 | from .throttled import Throttled
28 |
29 | __version__ = "2.2.3"
30 | VERSION = tuple(map(int, __version__.split(".")))
31 |
32 |
33 | __all__ = [
34 | "__version__",
35 | "VERSION",
36 | # public module
37 | "exceptions",
38 | "constants",
39 | "types",
40 | "utils",
41 | "asyncio",
42 | # rate_limiter
43 | "rate_limiter",
44 | "per_sec",
45 | "per_min",
46 | "per_hour",
47 | "per_day",
48 | "per_week",
49 | "per_duration",
50 | "Rate",
51 | "Quota",
52 | "RateLimitState",
53 | "RateLimitResult",
54 | "RateLimiterRegistry",
55 | "RateLimiterMeta",
56 | "BaseRateLimiter",
57 | # store
58 | "BaseStoreBackend",
59 | "BaseAtomicAction",
60 | "BaseStore",
61 | "MemoryStoreBackend",
62 | "MemoryStore",
63 | "RedisStoreBackend",
64 | "RedisStore",
65 | # throttled
66 | "Throttled",
67 | # constants
68 | "RateLimiterType",
69 | ]
70 |
--------------------------------------------------------------------------------
/throttled/asyncio/__init__.py:
--------------------------------------------------------------------------------
1 | from .. import constants, exceptions, types, utils
2 | from ..constants import RateLimiterType
3 | from .rate_limiter import (
4 | BaseRateLimiter,
5 | Quota,
6 | Rate,
7 | RateLimiterMeta,
8 | RateLimiterRegistry,
9 | RateLimitResult,
10 | RateLimitState,
11 | per_day,
12 | per_duration,
13 | per_hour,
14 | per_min,
15 | per_sec,
16 | per_week,
17 | )
18 | from .store import (
19 | BaseAtomicAction,
20 | BaseStore,
21 | BaseStoreBackend,
22 | MemoryStore,
23 | MemoryStoreBackend,
24 | RedisStore,
25 | RedisStoreBackend,
26 | )
27 | from .throttled import Throttled
28 |
29 | __version__ = "2.2.3"
30 | VERSION = tuple(map(int, __version__.split(".")))
31 |
32 | __all__ = [
33 | "__version__",
34 | "VERSION",
35 | # public module
36 | "exceptions",
37 | "constants",
38 | "types",
39 | "utils",
40 | # rate_limiter
41 | "rate_limiter",
42 | "per_sec",
43 | "per_min",
44 | "per_hour",
45 | "per_day",
46 | "per_week",
47 | "per_duration",
48 | "Rate",
49 | "Quota",
50 | "RateLimitState",
51 | "RateLimitResult",
52 | "RateLimiterRegistry",
53 | "RateLimiterMeta",
54 | "BaseRateLimiter",
55 | # store
56 | "BaseStoreBackend",
57 | "BaseAtomicAction",
58 | "BaseStore",
59 | "MemoryStoreBackend",
60 | "MemoryStore",
61 | "RedisStoreBackend",
62 | "RedisStore",
63 | # throttled
64 | "Throttled",
65 | # constants
66 | "RateLimiterType",
67 | ]
68 |
--------------------------------------------------------------------------------
/throttled/asyncio/rate_limiter/__init__.py:
--------------------------------------------------------------------------------
1 | from ...rate_limiter.base import (
2 | Quota,
3 | Rate,
4 | RateLimitResult,
5 | RateLimitState,
6 | per_day,
7 | per_duration,
8 | per_hour,
9 | per_min,
10 | per_sec,
11 | per_week,
12 | )
13 | from .base import BaseRateLimiter, RateLimiterMeta, RateLimiterRegistry
14 |
15 | # Trigger to register Async RateLimiter
16 | from .fixed_window import FixedWindowRateLimiter
17 | from .gcra import GCRARateLimiterCoreMixin
18 | from .leaking_bucket import LeakingBucketRateLimiter
19 | from .sliding_window import SlidingWindowRateLimiter
20 | from .token_bucket import TokenBucketRateLimiter
21 |
22 | __all__ = [
23 | "per_sec",
24 | "per_min",
25 | "per_hour",
26 | "per_day",
27 | "per_week",
28 | "per_duration",
29 | "Rate",
30 | "Quota",
31 | "RateLimitState",
32 | "RateLimitResult",
33 | "RateLimiterRegistry",
34 | "RateLimiterMeta",
35 | "BaseRateLimiter",
36 | "FixedWindowRateLimiter",
37 | "LeakingBucketRateLimiter",
38 | "SlidingWindowRateLimiter",
39 | "TokenBucketRateLimiter",
40 | "GCRARateLimiterCoreMixin",
41 | ]
42 |
--------------------------------------------------------------------------------
/throttled/asyncio/rate_limiter/base.py:
--------------------------------------------------------------------------------
1 | import abc
2 | from typing import Type
3 |
4 | from ... import rate_limiter
5 |
6 |
7 | class RateLimiterRegistry(rate_limiter.RateLimiterRegistry):
8 | """Registry for Async RateLimiter classes."""
9 |
10 | _NAMESPACE: str = "async"
11 |
12 |
13 | class RateLimiterMeta(rate_limiter.RateLimiterMeta):
14 | """Metaclass for Async RateLimiter classes."""
15 |
16 | _REGISTRY_CLASS: Type[RateLimiterRegistry] = RateLimiterRegistry
17 |
18 |
19 | class BaseRateLimiter(rate_limiter.BaseRateLimiterMixin, metaclass=RateLimiterMeta):
20 | """Base class for Async RateLimiter."""
21 |
22 | @abc.abstractmethod
23 | async def _limit(self, key: str, cost: int) -> rate_limiter.RateLimitResult:
24 | raise NotImplementedError
25 |
26 | @abc.abstractmethod
27 | async def _peek(self, key: str) -> rate_limiter.RateLimitState:
28 | raise NotImplementedError
29 |
30 | async def limit(self, key: str, cost: int = 1) -> rate_limiter.RateLimitResult:
31 | return await self._limit(key, cost)
32 |
33 | async def peek(self, key: str) -> rate_limiter.RateLimitState:
34 | return await self._peek(key)
35 |
--------------------------------------------------------------------------------
/throttled/asyncio/rate_limiter/fixed_window.py:
--------------------------------------------------------------------------------
1 | from typing import List, Optional, Sequence, Tuple, Type
2 |
3 | from ...constants import ATOMIC_ACTION_TYPE_LIMIT
4 | from ...rate_limiter.fixed_window import (
5 | FixedWindowRateLimiterCoreMixin,
6 | MemoryLimitAtomicActionCoreMixin,
7 | RedisLimitAtomicActionCoreMixin,
8 | )
9 | from ...types import KeyT, StoreValueT
10 | from ..store import BaseAtomicAction
11 | from . import BaseRateLimiter, RateLimitResult, RateLimitState
12 |
13 |
14 | class RedisLimitAtomicAction(RedisLimitAtomicActionCoreMixin, BaseAtomicAction):
15 | """Redis-based implementation of AtomicAction for Async FixedWindowRateLimiter."""
16 |
17 | async def do(
18 | self, keys: Sequence[KeyT], args: Optional[Sequence[StoreValueT]]
19 | ) -> Tuple[int, int]:
20 | period, limit, cost = args
21 | current: int = await self._backend.get_client().incrby(keys[0], cost)
22 | if current == cost:
23 | await self._backend.get_client().expire(keys[0], period)
24 | return [0, 1][current > limit and cost != 0], current
25 |
26 |
27 | class MemoryLimitAtomicAction(MemoryLimitAtomicActionCoreMixin, BaseAtomicAction):
28 | """Memory-based implementation of AtomicAction for Async FixedWindowRateLimiter."""
29 |
30 | async def do(
31 | self, keys: Sequence[KeyT], args: Optional[Sequence[StoreValueT]]
32 | ) -> Tuple[int, int]:
33 | async with self._backend.lock:
34 | return self._do(self._backend, keys, args)
35 |
36 |
37 | class FixedWindowRateLimiter(FixedWindowRateLimiterCoreMixin, BaseRateLimiter):
38 | """Concrete implementation of BaseRateLimiter using fixed window as algorithm."""
39 |
40 | _DEFAULT_ATOMIC_ACTION_CLASSES: List[Type[BaseAtomicAction]] = [
41 | RedisLimitAtomicAction,
42 | MemoryLimitAtomicAction,
43 | ]
44 |
45 | async def _limit(self, key: str, cost: int = 1) -> RateLimitResult:
46 | period_key, period, limit, now = self._prepare(key)
47 | limited, current = await self._atomic_actions[ATOMIC_ACTION_TYPE_LIMIT].do(
48 | [period_key], [period, limit, cost]
49 | )
50 |
51 | reset_after: float = period - (now % period)
52 | return RateLimitResult(
53 | limited=bool(limited),
54 | state_values=(
55 | limit,
56 | max(0, limit - current),
57 | reset_after,
58 | (0, reset_after)[limited],
59 | ),
60 | )
61 |
62 | async def _peek(self, key: str) -> RateLimitState:
63 | period_key, period, limit, now = self._prepare(key)
64 | current: int = int(await self._store.get(period_key) or 0)
65 | return RateLimitState(
66 | limit=limit,
67 | remaining=max(0, limit - current),
68 | reset_after=period - (now % period),
69 | )
70 |
--------------------------------------------------------------------------------
/throttled/asyncio/rate_limiter/gcra.py:
--------------------------------------------------------------------------------
1 | from typing import List, Optional, Sequence, Tuple, Type
2 |
3 | from ...constants import ATOMIC_ACTION_TYPE_LIMIT, ATOMIC_ACTION_TYPE_PEEK
4 | from ...rate_limiter.gcra import (
5 | GCRARateLimiterCoreMixin,
6 | MemoryLimitAtomicActionCoreMixin,
7 | MemoryPeekAtomicActionCoreMixin,
8 | RedisLimitAtomicActionCoreMixin,
9 | RedisPeekAtomicActionCoreMixin,
10 | )
11 | from ...types import AtomicActionP, KeyT, StoreValueT
12 | from ..store import BaseAtomicAction
13 | from . import BaseRateLimiter, RateLimitResult, RateLimitState
14 |
15 |
16 | class RedisLimitAtomicAction(RedisLimitAtomicActionCoreMixin, BaseAtomicAction):
17 | """Redis-based implementation of AtomicAction for Async GCRARateLimiter."""
18 |
19 | async def do(
20 | self, keys: Sequence[KeyT], args: Optional[Sequence[StoreValueT]]
21 | ) -> Tuple[int, int, float, float]:
22 | limited, remaining, reset_after, retry_after = await self._script(keys, args)
23 | return limited, remaining, float(reset_after), float(retry_after)
24 |
25 |
26 | class RedisPeekAtomicAction(RedisPeekAtomicActionCoreMixin, RedisLimitAtomicAction):
27 | """
28 | Redis-based implementation of AtomicAction for GCRARateLimiter's peek operation.
29 | """
30 |
31 |
32 | class MemoryLimitAtomicAction(MemoryLimitAtomicActionCoreMixin, BaseAtomicAction):
33 | """Memory-based implementation of AtomicAction for Async LeakingBucketRateLimiter."""
34 |
35 | async def do(
36 | self, keys: Sequence[KeyT], args: Optional[Sequence[StoreValueT]]
37 | ) -> Tuple[int, int, float, float]:
38 | async with self._backend.lock:
39 | return self._do(self._backend, keys, args)
40 |
41 |
42 | class MemoryPeekAtomicAction(MemoryPeekAtomicActionCoreMixin, MemoryLimitAtomicAction):
43 | """
44 | Memory-based implementation of AtomicAction for GCRARateLimiter's peek operation.
45 | """
46 |
47 |
48 | class GCRARateLimiter(GCRARateLimiterCoreMixin, BaseRateLimiter):
49 | """Concrete implementation of BaseRateLimiter using GCRA as algorithm."""
50 |
51 | _DEFAULT_ATOMIC_ACTION_CLASSES: List[Type[AtomicActionP]] = [
52 | RedisPeekAtomicAction,
53 | RedisLimitAtomicAction,
54 | MemoryLimitAtomicAction,
55 | MemoryPeekAtomicAction,
56 | ]
57 |
58 | async def _limit(self, key: str, cost: int = 1) -> RateLimitResult:
59 | formatted_key, emission_interval, capacity = self._prepare(key)
60 | limited, remaining, reset_after, retry_after = await self._atomic_actions[
61 | ATOMIC_ACTION_TYPE_LIMIT
62 | ].do([formatted_key], [emission_interval, capacity, cost])
63 |
64 | return RateLimitResult(
65 | limited=bool(limited),
66 | state_values=(capacity, remaining, reset_after, retry_after),
67 | )
68 |
69 | async def _peek(self, key: str) -> RateLimitState:
70 | formatted_key, emission_interval, capacity = self._prepare(key)
71 | limited, remaining, reset_after, retry_after = await self._atomic_actions[
72 | ATOMIC_ACTION_TYPE_PEEK
73 | ].do([formatted_key], [emission_interval, capacity])
74 | return RateLimitState(
75 | limit=capacity,
76 | remaining=remaining,
77 | reset_after=reset_after,
78 | retry_after=retry_after,
79 | )
80 |
--------------------------------------------------------------------------------
/throttled/asyncio/rate_limiter/leaking_bucket.py:
--------------------------------------------------------------------------------
1 | import math
2 | from typing import List, Optional, Sequence, Tuple, Type
3 |
4 | from ...constants import ATOMIC_ACTION_TYPE_LIMIT
5 | from ...rate_limiter.leaking_bucket import (
6 | LeakingBucketRateLimiterCoreMixin,
7 | MemoryLimitAtomicActionCoreMixin,
8 | RedisLimitAtomicActionCoreMixin,
9 | )
10 | from ...types import KeyT, StoreDictValueT, StoreValueT
11 | from ...utils import now_sec
12 | from ..store import BaseAtomicAction
13 | from . import BaseRateLimiter, RateLimitResult, RateLimitState
14 |
15 |
16 | class RedisLimitAtomicAction(RedisLimitAtomicActionCoreMixin, BaseAtomicAction):
17 | """Redis-based implementation of AtomicAction for Async LeakingBucketRateLimiter."""
18 |
19 | async def do(
20 | self, keys: Sequence[KeyT], args: Optional[Sequence[StoreValueT]]
21 | ) -> Tuple[int, int]:
22 | return await self._script(keys, args)
23 |
24 |
25 | class MemoryLimitAtomicAction(MemoryLimitAtomicActionCoreMixin, BaseAtomicAction):
26 | """Memory-based implementation of AtomicAction for Async LeakingBucketRateLimiter."""
27 |
28 | async def do(
29 | self, keys: Sequence[KeyT], args: Optional[Sequence[StoreValueT]]
30 | ) -> Tuple[int, int]:
31 | async with self._backend.lock:
32 | return self._do(self._backend, keys, args)
33 |
34 |
35 | class LeakingBucketRateLimiter(LeakingBucketRateLimiterCoreMixin, BaseRateLimiter):
36 | """Concrete implementation of BaseRateLimiter using leaking bucket as algorithm."""
37 |
38 | _DEFAULT_ATOMIC_ACTION_CLASSES: List[Type[BaseAtomicAction]] = [
39 | RedisLimitAtomicAction,
40 | MemoryLimitAtomicAction,
41 | ]
42 |
43 | async def _limit(self, key: str, cost: int = 1) -> RateLimitResult:
44 | formatted_key, rate, capacity = self._prepare(key)
45 | limited, tokens = await self._atomic_actions[ATOMIC_ACTION_TYPE_LIMIT].do(
46 | [formatted_key], [rate, capacity, cost, now_sec()]
47 | )
48 | return self._to_result(limited, cost, tokens, capacity)
49 |
50 | async def _peek(self, key: str) -> RateLimitState:
51 | now: int = now_sec()
52 | formatted_key, rate, capacity = self._prepare(key)
53 |
54 | bucket: StoreDictValueT = await self._store.hgetall(formatted_key)
55 | last_tokens: int = bucket.get("tokens", 0)
56 | last_refreshed: int = bucket.get("last_refreshed", now)
57 |
58 | time_elapsed: int = max(0, now - last_refreshed)
59 | tokens: int = max(0, last_tokens - math.floor(time_elapsed * rate))
60 |
61 | return RateLimitState(
62 | limit=capacity,
63 | remaining=capacity - tokens,
64 | reset_after=math.ceil(tokens / rate),
65 | )
66 |
--------------------------------------------------------------------------------
/throttled/asyncio/rate_limiter/sliding_window.py:
--------------------------------------------------------------------------------
1 | import math
2 | from typing import List, Optional, Sequence, Tuple, Type
3 |
4 | from ...constants import ATOMIC_ACTION_TYPE_LIMIT
5 | from ...rate_limiter.sliding_window import (
6 | MemoryLimitAtomicActionCoreMixin,
7 | RedisLimitAtomicActionCoreMixin,
8 | SlidingWindowRateLimiterCoreMixin,
9 | )
10 | from ...types import AtomicActionP, KeyT, StoreValueT
11 | from ...utils import now_ms
12 | from ..store import BaseAtomicAction
13 | from . import BaseRateLimiter, RateLimitResult, RateLimitState
14 |
15 |
16 | class RedisLimitAtomicAction(RedisLimitAtomicActionCoreMixin, BaseAtomicAction):
17 | """Redis-based implementation of AtomicAction for Async SlidingWindowRateLimiter."""
18 |
19 | async def do(
20 | self, keys: Sequence[KeyT], args: Optional[Sequence[StoreValueT]]
21 | ) -> Tuple[int, int, float]:
22 | return await self._script(keys, args)
23 |
24 |
25 | class MemoryLimitAtomicAction(MemoryLimitAtomicActionCoreMixin, BaseAtomicAction):
26 | """Memory-based implementation of AtomicAction for Async SlidingWindowRateLimiter."""
27 |
28 | async def do(
29 | self, keys: Sequence[KeyT], args: Optional[Sequence[StoreValueT]]
30 | ) -> Tuple[int, int, float]:
31 | async with self._backend.lock:
32 | return self._do(self._backend, keys, args)
33 |
34 |
35 | class SlidingWindowRateLimiter(SlidingWindowRateLimiterCoreMixin, BaseRateLimiter):
36 | """Concrete implementation of BaseRateLimiter using sliding window as algorithm."""
37 |
38 | _DEFAULT_ATOMIC_ACTION_CLASSES: List[Type[AtomicActionP]] = [
39 | RedisLimitAtomicAction,
40 | MemoryLimitAtomicAction,
41 | ]
42 |
43 | async def _limit(self, key: str, cost: int = 1) -> RateLimitResult:
44 | current_key, previous_key, period, limit = self._prepare(key)
45 | limited, used, retry_after = await self._atomic_actions[
46 | ATOMIC_ACTION_TYPE_LIMIT
47 | ].do([current_key, previous_key], [period, limit, cost, now_ms()])
48 | return RateLimitResult(
49 | limited=bool(limited),
50 | state_values=(limit, max(0, limit - used), period, retry_after),
51 | )
52 |
53 | async def _peek(self, key: str) -> RateLimitState:
54 | current_key, previous_key, period, limit = self._prepare(key)
55 | period_ms: int = period * 1000
56 | current_proportion: float = (now_ms() % period_ms) / period_ms
57 |
58 | previous: int = math.floor(
59 | (1 - current_proportion) * (await self._store.get(previous_key) or 0)
60 | )
61 | used: int = previous + (await self._store.get(current_key) or 0)
62 |
63 | return RateLimitState(
64 | limit=limit, remaining=max(0, limit - used), reset_after=period
65 | )
66 |
--------------------------------------------------------------------------------
/throttled/asyncio/rate_limiter/token_bucket.py:
--------------------------------------------------------------------------------
1 | import math
2 | from typing import List, Optional, Sequence, Tuple, Type
3 |
4 | from ...constants import ATOMIC_ACTION_TYPE_LIMIT
5 | from ...rate_limiter.token_bucket import (
6 | MemoryLimitAtomicActionCoreMixin,
7 | RedisLimitAtomicActionCoreMixin,
8 | TokenBucketRateLimiterCoreMixin,
9 | )
10 | from ...types import AtomicActionP, KeyT, StoreDictValueT, StoreValueT
11 | from ...utils import now_sec
12 | from ..store import BaseAtomicAction
13 | from . import BaseRateLimiter, RateLimitResult, RateLimitState
14 |
15 |
16 | class RedisLimitAtomicAction(RedisLimitAtomicActionCoreMixin, BaseAtomicAction):
17 | """Redis-based implementation of AtomicAction for Async TokenBucketRateLimiter."""
18 |
19 | async def do(
20 | self, keys: Sequence[KeyT], args: Optional[Sequence[StoreValueT]]
21 | ) -> Tuple[int, int]:
22 | return await self._script(keys, args)
23 |
24 |
25 | class MemoryLimitAtomicAction(MemoryLimitAtomicActionCoreMixin, BaseAtomicAction):
26 | """Memory-based implementation of AtomicAction for Async LeakingBucketRateLimiter."""
27 |
28 | async def do(
29 | self, keys: Sequence[KeyT], args: Optional[Sequence[StoreValueT]]
30 | ) -> Tuple[int, int]:
31 | async with self._backend.lock:
32 | return self._do(self._backend, keys, args)
33 |
34 |
35 | class TokenBucketRateLimiter(TokenBucketRateLimiterCoreMixin, BaseRateLimiter):
36 | """Concrete implementation of BaseRateLimiter using leaking bucket as algorithm."""
37 |
38 | _DEFAULT_ATOMIC_ACTION_CLASSES: List[Type[AtomicActionP]] = [
39 | RedisLimitAtomicAction,
40 | MemoryLimitAtomicAction,
41 | ]
42 |
43 | async def _limit(self, key: str, cost: int = 1) -> RateLimitResult:
44 | formatted_key, rate, capacity = self._prepare(key)
45 | limited, tokens = await self._atomic_actions[ATOMIC_ACTION_TYPE_LIMIT].do(
46 | [formatted_key], [rate, capacity, cost, now_sec()]
47 | )
48 | return self._to_result(limited, cost, tokens, capacity)
49 |
50 | async def _peek(self, key: str) -> RateLimitState:
51 | now: int = now_sec()
52 | formatted_key, rate, capacity = self._prepare(key)
53 |
54 | bucket: StoreDictValueT = await self._store.hgetall(formatted_key)
55 | last_tokens: int = bucket.get("tokens", capacity)
56 | last_refreshed: int = bucket.get("last_refreshed", now)
57 |
58 | time_elapsed: int = max(0, now - last_refreshed)
59 | tokens: int = min(capacity, last_tokens + (math.floor(time_elapsed * rate)))
60 | reset_after: int = math.ceil((capacity - tokens) / rate)
61 |
62 | return RateLimitState(limit=capacity, remaining=tokens, reset_after=reset_after)
63 |
--------------------------------------------------------------------------------
/throttled/asyncio/store/__init__.py:
--------------------------------------------------------------------------------
1 | from ...store import BaseStoreBackend
2 | from .base import BaseAtomicAction, BaseStore
3 | from .memory import MemoryStore, MemoryStoreBackend
4 | from .redis import RedisStore, RedisStoreBackend
5 |
6 | __all__ = [
7 | "BaseStoreBackend",
8 | "BaseAtomicAction",
9 | "BaseStore",
10 | "MemoryStoreBackend",
11 | "MemoryStore",
12 | "RedisStoreBackend",
13 | "RedisStore",
14 | ]
15 |
--------------------------------------------------------------------------------
/throttled/asyncio/store/base.py:
--------------------------------------------------------------------------------
1 | import abc
2 | from typing import Any, Optional, Sequence, Type
3 |
4 | from ...store.base import BaseAtomicActionMixin, BaseStoreMixin
5 | from ...types import AtomicActionP, KeyT, StoreDictValueT, StoreValueT
6 |
7 |
8 | class BaseAtomicAction(BaseAtomicActionMixin, abc.ABC):
9 | """Abstract class for all async atomic actions performed by a store backend."""
10 |
11 | @abc.abstractmethod
12 | async def do(
13 | self, keys: Sequence[KeyT], args: Optional[Sequence[StoreValueT]]
14 | ) -> Any:
15 | """Execute the AtomicAction on the specified keys with optional arguments.
16 | :param keys: A sequence of keys.
17 | :param args: Optional sequence of arguments.
18 | :return: Any: The result of the AtomicAction.
19 | """
20 | raise NotImplementedError
21 |
22 |
23 | class BaseStore(BaseStoreMixin, abc.ABC):
24 | """Abstract class for all async stores."""
25 |
26 | @abc.abstractmethod
27 | async def exists(self, key: KeyT) -> bool:
28 | """Check if the specified key exists.
29 | :param key: The key to check.
30 | :return: True if the specified key exists, False otherwise.
31 | """
32 | raise NotImplementedError
33 |
34 | @abc.abstractmethod
35 | async def ttl(self, key: KeyT) -> int:
36 | """Returns the number of seconds until the specified key will expire.
37 | :param key: The key to check.
38 | :raise: DataError
39 | """
40 | raise NotImplementedError
41 |
42 | @abc.abstractmethod
43 | async def expire(self, key: KeyT, timeout: int) -> None:
44 | raise NotImplementedError
45 |
46 | @abc.abstractmethod
47 | async def set(self, key: KeyT, value: StoreValueT, timeout: int) -> None:
48 | """Set a value for the specified key with specified timeout.
49 | :param key: The key to set.
50 | :param value: The value to set.
51 | :param timeout: The timeout in seconds.
52 | """
53 | raise NotImplementedError
54 |
55 | @abc.abstractmethod
56 | async def get(self, key: KeyT) -> Optional[StoreValueT]:
57 | """Get a value for the specified key.
58 | :param key: The key for which to get a value.
59 | :return: The value for the specified key, or None if it does not exist.
60 | """
61 | raise NotImplementedError
62 |
63 | @abc.abstractmethod
64 | async def hset(
65 | self,
66 | name: KeyT,
67 | key: Optional[KeyT] = None,
68 | value: Optional[StoreValueT] = None,
69 | mapping: Optional[StoreDictValueT] = None,
70 | ) -> None:
71 | """Set a value for the specified key in the specified hash.
72 | :param name: The name of the hash.
73 | :param key: The key in the hash.
74 | :param value: The value to set.
75 | :param mapping: A dictionary of key-value pairs to set.
76 | """
77 | raise NotImplementedError
78 |
79 | @abc.abstractmethod
80 | async def hgetall(self, name: KeyT) -> StoreDictValueT:
81 | raise NotImplementedError
82 |
83 | @abc.abstractmethod
84 | def make_atomic(self, action_cls: Type[AtomicActionP]) -> AtomicActionP:
85 | """Create an instance of an AtomicAction for this store.
86 | :param action_cls: The class of the AtomicAction.
87 | :return: The AtomicAction instance.
88 | """
89 | raise NotImplementedError
90 |
--------------------------------------------------------------------------------
/throttled/asyncio/store/memory.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | from typing import Any, Dict, Optional, Type
3 |
4 | from ... import constants, store
5 | from ...types import AtomicActionP, KeyT, LockP, StoreDictValueT, StoreValueT
6 | from . import BaseStore
7 |
8 |
9 | class MemoryStoreBackend(store.MemoryStoreBackend):
10 | """Backend for Async MemoryStore."""
11 |
12 | def _get_lock(self) -> LockP:
13 | return asyncio.Lock()
14 |
15 |
16 | class MemoryStore(BaseStore):
17 | """Concrete implementation of BaseStore using Memory as backend."""
18 |
19 | TYPE: str = constants.StoreType.MEMORY.value
20 |
21 | _BACKEND_CLASS: Type[MemoryStoreBackend] = MemoryStoreBackend
22 |
23 | def __init__(
24 | self, server: Optional[str] = None, options: Optional[Dict[str, Any]] = None
25 | ):
26 | super().__init__(server, options)
27 | self._backend: MemoryStoreBackend = self._BACKEND_CLASS(server, options)
28 |
29 | async def exists(self, key: KeyT) -> bool:
30 | return self._backend.exists(key)
31 |
32 | async def ttl(self, key: KeyT) -> int:
33 | return self._backend.ttl(key)
34 |
35 | async def expire(self, key: KeyT, timeout: int) -> None:
36 | self._validate_timeout(timeout)
37 | self._backend.expire(key, timeout)
38 |
39 | async def set(self, key: KeyT, value: StoreValueT, timeout: int) -> None:
40 | self._validate_timeout(timeout)
41 | async with self._backend.lock:
42 | self._backend.set(key, value, timeout)
43 |
44 | async def get(self, key: KeyT) -> Optional[StoreValueT]:
45 | async with self._backend.lock:
46 | return self._backend.get(key)
47 |
48 | async def hset(
49 | self,
50 | name: KeyT,
51 | key: Optional[KeyT] = None,
52 | value: Optional[StoreValueT] = None,
53 | mapping: Optional[StoreDictValueT] = None,
54 | ) -> None:
55 | async with self._backend.lock:
56 | self._backend.hset(name, key, value, mapping)
57 |
58 | async def hgetall(self, name: KeyT) -> StoreDictValueT:
59 | async with self._backend.lock:
60 | return self._backend.hgetall(name)
61 |
62 | def make_atomic(self, action_cls: Type[AtomicActionP]) -> AtomicActionP:
63 | return action_cls(backend=self._backend)
64 |
--------------------------------------------------------------------------------
/throttled/asyncio/store/redis.py:
--------------------------------------------------------------------------------
1 | from typing import Any, Dict, Optional, Type
2 |
3 | from ... import constants, store, utils
4 | from ...exceptions import DataError
5 | from ...types import AtomicActionP, KeyT, StoreDictValueT, StoreValueT
6 | from . import BaseStore
7 |
8 |
9 | class RedisStoreBackend(store.RedisStoreBackend):
10 | """Backend for Async RedisStore."""
11 |
12 | def __init__(
13 | self, server: Optional[str] = None, options: Optional[Dict[str, Any]] = None
14 | ):
15 | options = options or {}
16 | # Set default options for asyncio Redis.
17 | options.setdefault("REUSE_CONNECTION", False)
18 | options.setdefault("CONNECTION_POOL_CLASS", "redis.asyncio.ConnectionPool")
19 | options.setdefault("REDIS_CLIENT_CLASS", "redis.asyncio.Redis")
20 | options.setdefault("PARSER_CLASS", "redis.asyncio.connection.DefaultParser")
21 |
22 | super().__init__(server, options)
23 |
24 |
25 | class RedisStore(BaseStore):
26 | """Concrete implementation of BaseStore using Redis as backend."""
27 |
28 | TYPE: str = constants.StoreType.REDIS.value
29 |
30 | _BACKEND_CLASS: Type[RedisStoreBackend] = RedisStoreBackend
31 |
32 | def __init__(
33 | self, server: Optional[str] = None, options: Optional[Dict[str, Any]] = None
34 | ):
35 | super().__init__(server, options)
36 | self._backend: RedisStoreBackend = self._BACKEND_CLASS(server, options)
37 |
38 | async def exists(self, key: KeyT) -> bool:
39 | return bool(await self._backend.get_client().exists(key))
40 |
41 | async def ttl(self, key: KeyT) -> int:
42 | return int(await self._backend.get_client().ttl(key))
43 |
44 | async def expire(self, key: KeyT, timeout: int) -> None:
45 | self._validate_timeout(timeout)
46 | await self._backend.get_client().expire(key, timeout)
47 |
48 | async def set(self, key: KeyT, value: StoreValueT, timeout: int) -> None:
49 | self._validate_timeout(timeout)
50 | await self._backend.get_client().set(key, value, ex=timeout)
51 |
52 | async def get(self, key: KeyT) -> Optional[StoreValueT]:
53 | value: Optional[StoreValueT] = await self._backend.get_client().get(key)
54 | if value is None:
55 | return None
56 |
57 | return utils.format_value(value)
58 |
59 | async def hset(
60 | self,
61 | name: KeyT,
62 | key: Optional[KeyT] = None,
63 | value: Optional[StoreValueT] = None,
64 | mapping: Optional[StoreDictValueT] = None,
65 | ) -> None:
66 | if key is None and not mapping:
67 | raise DataError("hset must with key value pairs")
68 | await self._backend.get_client().hset(name, key, value, mapping)
69 |
70 | async def hgetall(self, name: KeyT) -> StoreDictValueT:
71 | return utils.format_kv(await self._backend.get_client().hgetall(name))
72 |
73 | def make_atomic(self, action_cls: Type[AtomicActionP]) -> AtomicActionP:
74 | return action_cls(backend=self._backend)
75 |
--------------------------------------------------------------------------------
/throttled/constants.py:
--------------------------------------------------------------------------------
1 | from enum import Enum
2 | from typing import List
3 |
4 | from .types import AtomicActionTypeT, RateLimiterTypeT
5 |
6 |
7 | class StoreType(Enum):
8 | REDIS = "redis"
9 | MEMORY = "memory"
10 |
11 | @classmethod
12 | def choice(cls) -> List[str]:
13 | return [cls.REDIS.value, cls.MEMORY.value]
14 |
15 |
16 | STORE_TTL_STATE_NOT_TTL: int = -1
17 | STORE_TTL_STATE_NOT_EXIST: int = -2
18 |
19 | # Enumeration for types of AtomicActions
20 | ATOMIC_ACTION_TYPE_LIMIT: AtomicActionTypeT = "limit"
21 | ATOMIC_ACTION_TYPE_PEEK: AtomicActionTypeT = "peek"
22 |
23 |
24 | class RateLimiterType(Enum):
25 | """Enumeration for types of RateLimiter."""
26 |
27 | FIXED_WINDOW = "fixed_window"
28 | SLIDING_WINDOW = "sliding_window"
29 | LEAKING_BUCKET = "leaking_bucket"
30 | TOKEN_BUCKET = "token_bucket"
31 | GCRA = "gcra"
32 |
33 | @classmethod
34 | def choice(cls) -> List[RateLimiterTypeT]:
35 | return [
36 | cls.FIXED_WINDOW.value,
37 | cls.SLIDING_WINDOW.value,
38 | cls.LEAKING_BUCKET.value,
39 | cls.TOKEN_BUCKET.value,
40 | cls.GCRA.value,
41 | ]
42 |
--------------------------------------------------------------------------------
/throttled/exceptions.py:
--------------------------------------------------------------------------------
1 | from typing import TYPE_CHECKING, Optional
2 |
3 | if TYPE_CHECKING:
4 | from throttled.rate_limiter import RateLimitResult
5 |
6 |
7 | class BaseThrottledError(Exception):
8 | """Base class for all throttled-related exceptions."""
9 |
10 | pass
11 |
12 |
13 | class SetUpError(BaseThrottledError):
14 | """Exception raised when there is an error during setup."""
15 |
16 | pass
17 |
18 |
19 | class DataError(BaseThrottledError):
20 | """Exception raised for errors related to data integrity or format.
21 |
22 | Thrown when the parameter is invalid, such as: Invalid key: None,
23 | must be a non-empty key.
24 | """
25 |
26 | pass
27 |
28 |
29 | class StoreUnavailableError(BaseThrottledError):
30 | """Exception raised when the store (e.g., Redis) is unavailable."""
31 |
32 | pass
33 |
34 |
35 | class LimitedError(BaseThrottledError):
36 | """Exception raised when a rate limit is exceeded.
37 |
38 | When a request is throttled, an exception is thrown, such as:
39 | Rate limit exceeded: remaining=0, reset_after=60, retry_after=60.
40 | """
41 |
42 | def __init__(self, rate_limit_result: Optional["RateLimitResult"] = None):
43 | #: The result after executing the RateLimiter for the given key.
44 | self.rate_limit_result: Optional["RateLimitResult"] = rate_limit_result
45 | if not self.rate_limit_result or not self.rate_limit_result.state:
46 | message: str = "Rate limit exceeded."
47 | else:
48 | message: str = (
49 | "Rate limit exceeded: remaining={remaining}, "
50 | "reset_after={reset_after}, retry_after={retry_after}."
51 | ).format(
52 | remaining=self.rate_limit_result.state.remaining,
53 | reset_after=self.rate_limit_result.state.reset_after,
54 | retry_after=self.rate_limit_result.state.retry_after,
55 | )
56 | super().__init__(message)
57 |
--------------------------------------------------------------------------------
/throttled/rate_limiter/__init__.py:
--------------------------------------------------------------------------------
1 | from .base import (
2 | BaseRateLimiter,
3 | BaseRateLimiterMixin,
4 | Quota,
5 | Rate,
6 | RateLimiterMeta,
7 | RateLimiterRegistry,
8 | RateLimitResult,
9 | RateLimitState,
10 | per_day,
11 | per_duration,
12 | per_hour,
13 | per_min,
14 | per_sec,
15 | per_week,
16 | )
17 |
18 | # Trigger to register RateLimiter
19 | from .fixed_window import FixedWindowRateLimiter
20 | from .gcra import GCRARateLimiter
21 | from .leaking_bucket import LeakingBucketRateLimiter
22 | from .sliding_window import SlidingWindowRateLimiter
23 | from .token_bucket import TokenBucketRateLimiter
24 |
25 | __all__ = [
26 | "per_sec",
27 | "per_min",
28 | "per_hour",
29 | "per_day",
30 | "per_week",
31 | "per_duration",
32 | "Rate",
33 | "Quota",
34 | "RateLimitState",
35 | "RateLimitResult",
36 | "RateLimiterRegistry",
37 | "RateLimiterMeta",
38 | "BaseRateLimiter",
39 | "BaseRateLimiterMixin",
40 | "FixedWindowRateLimiter",
41 | "SlidingWindowRateLimiter",
42 | "TokenBucketRateLimiter",
43 | "LeakingBucketRateLimiter",
44 | "GCRARateLimiter",
45 | ]
46 |
--------------------------------------------------------------------------------
/throttled/rate_limiter/fixed_window.py:
--------------------------------------------------------------------------------
1 | from typing import TYPE_CHECKING, List, Optional, Sequence, Tuple, Type
2 |
3 | from ..constants import ATOMIC_ACTION_TYPE_LIMIT, RateLimiterType, StoreType
4 | from ..store import BaseAtomicAction
5 | from ..types import AtomicActionP, AtomicActionTypeT, KeyT, RateLimiterTypeT, StoreValueT
6 | from ..utils import now_sec
7 | from . import BaseRateLimiter, BaseRateLimiterMixin, RateLimitResult, RateLimitState
8 |
9 | if TYPE_CHECKING:
10 | from ..store import MemoryStoreBackend, RedisStoreBackend
11 |
12 |
13 | class RedisLimitAtomicActionCoreMixin:
14 | """Core mixin for RedisLimitAtomicAction."""
15 |
16 | TYPE: AtomicActionTypeT = ATOMIC_ACTION_TYPE_LIMIT
17 | STORE_TYPE: str = StoreType.REDIS.value
18 |
19 | SCRIPTS: str = """
20 | local period = tonumber(ARGV[1])
21 | local limit = tonumber(ARGV[2])
22 | local cost = tonumber(ARGV[3])
23 | local current = redis.call("INCRBY", KEYS[1], cost)
24 |
25 | if current == cost then
26 | redis.call("EXPIRE", KEYS[1], period)
27 | end
28 |
29 | return {current > limit and 1 or 0, current}
30 | """
31 |
32 | def __init__(self, backend: "RedisStoreBackend"):
33 | # In single command scenario, lua has no performance advantage, and even causes
34 | # a decrease in performance due to the increase in transmission content.
35 | # Benchmarks(Python 3.8, Darwin 23.6.0, Arm)
36 | # >> Redis baseline
37 | # command -> set key value
38 | # serial -> 🕒Latency: 0.0609 ms/op, 🚀Throughput: 16271 req/s
39 | # concurrent -> 🕒Latency: 0.4515 ms/op, 💤Throughput: 12100 req/s
40 | # >> Lua
41 | # serial -> 🕒Latency: 0.0805 ms/op, 🚀Throughput: 12319 req/s
42 | # concurrent -> 🕒Latency: 0.6959 ms/op, 💤Throughput: 10301 req/s
43 | # >> 👍 Single Command
44 | # serial -> 🕒Latency: 0.0659 ms/op, 🚀Throughput: 15040 req/s
45 | # concurrent -> 🕒Latency: 0.9084 ms/op, 💤Throughput: 11539 req/s
46 | # self._script: Script = backend.get_client().register_script(self.SCRIPTS)
47 | super().__init__(backend)
48 | self._backend: RedisStoreBackend = backend
49 |
50 |
51 | class RedisLimitAtomicAction(RedisLimitAtomicActionCoreMixin, BaseAtomicAction):
52 | """Redis-based implementation of AtomicAction for FixedWindowRateLimiter."""
53 |
54 | def do(
55 | self, keys: Sequence[KeyT], args: Optional[Sequence[StoreValueT]]
56 | ) -> Tuple[int, int]:
57 | period, limit, cost = args
58 | current: int = self._backend.get_client().incrby(keys[0], cost)
59 | if current == cost:
60 | self._backend.get_client().expire(keys[0], period)
61 | return [0, 1][current > limit and cost != 0], current
62 |
63 |
64 | class MemoryLimitAtomicActionCoreMixin:
65 | """Core mixin for MemoryLimitAtomicAction."""
66 |
67 | TYPE: AtomicActionTypeT = ATOMIC_ACTION_TYPE_LIMIT
68 | STORE_TYPE: str = StoreType.MEMORY.value
69 |
70 | def __init__(self, backend: "MemoryStoreBackend"):
71 | super().__init__(backend)
72 | self._backend: MemoryStoreBackend = backend
73 |
74 | @classmethod
75 | def _do(
76 | cls,
77 | backend: "MemoryStoreBackend",
78 | keys: Sequence[KeyT],
79 | args: Optional[Sequence[StoreValueT]],
80 | ) -> Tuple[int, int]:
81 | key: str = keys[0]
82 | period, limit, cost = args
83 | current: Optional[int] = backend.get(key)
84 | if current is None:
85 | current = cost
86 | backend.set(key, current, period)
87 | else:
88 | current += cost
89 | backend.get_client()[key] = current
90 |
91 | return (0, 1)[current > limit and cost != 0], current
92 |
93 |
94 | class MemoryLimitAtomicAction(MemoryLimitAtomicActionCoreMixin, BaseAtomicAction):
95 | """Memory-based implementation of AtomicAction for FixedWindowRateLimiter."""
96 |
97 | def do(
98 | self, keys: Sequence[KeyT], args: Optional[Sequence[StoreValueT]]
99 | ) -> Tuple[int, int]:
100 | with self._backend.lock:
101 | return self._do(self._backend, keys, args)
102 |
103 |
104 | class FixedWindowRateLimiterCoreMixin(BaseRateLimiterMixin):
105 | """Core mixin for FixedWindowRateLimiter."""
106 |
107 | _DEFAULT_ATOMIC_ACTION_CLASSES: List[Type[AtomicActionP]] = []
108 |
109 | class Meta:
110 | type: RateLimiterTypeT = RateLimiterType.FIXED_WINDOW.value
111 |
112 | @classmethod
113 | def _default_atomic_action_classes(cls) -> List[Type[AtomicActionP]]:
114 | return cls._DEFAULT_ATOMIC_ACTION_CLASSES
115 |
116 | @classmethod
117 | def _supported_atomic_action_types(cls) -> List[AtomicActionTypeT]:
118 | return [ATOMIC_ACTION_TYPE_LIMIT]
119 |
120 | def _prepare(self, key: str) -> Tuple[str, int, int, int]:
121 | now: int = now_sec()
122 | period: int = self.quota.get_period_sec()
123 | period_key: str = f"{key}:period:{now // period}"
124 | return self._prepare_key(period_key), period, self.quota.get_limit(), now
125 |
126 |
127 | class FixedWindowRateLimiter(FixedWindowRateLimiterCoreMixin, BaseRateLimiter):
128 | """Concrete implementation of BaseRateLimiter using fixed window as algorithm."""
129 |
130 | _DEFAULT_ATOMIC_ACTION_CLASSES: List[Type[BaseAtomicAction]] = [
131 | RedisLimitAtomicAction,
132 | MemoryLimitAtomicAction,
133 | ]
134 |
135 | def _limit(self, key: str, cost: int = 1) -> RateLimitResult:
136 | period_key, period, limit, now = self._prepare(key)
137 | limited, current = self._atomic_actions[ATOMIC_ACTION_TYPE_LIMIT].do(
138 | [period_key], [period, limit, cost]
139 | )
140 |
141 | # |-- now % period --|-- reset_after --|----- next period -----|
142 | # |--------------- period -------------|
143 | reset_after: float = period - (now % period)
144 | return RateLimitResult(
145 | limited=bool(limited),
146 | state_values=(
147 | limit,
148 | max(0, limit - current),
149 | reset_after,
150 | (0, reset_after)[limited],
151 | ),
152 | )
153 |
154 | def _peek(self, key: str) -> RateLimitState:
155 | period_key, period, limit, now = self._prepare(key)
156 | current: int = int(self._store.get(period_key) or 0)
157 | return RateLimitState(
158 | limit=limit,
159 | remaining=max(0, limit - current),
160 | reset_after=period - (now % period),
161 | )
162 |
--------------------------------------------------------------------------------
/throttled/rate_limiter/lua/fixed_window.lua:
--------------------------------------------------------------------------------
1 | -- Fixed Window algorithm implementation for rate limiting.
2 | -- ARGV[1]: period - The window period in seconds.
3 | -- ARGV[2]: limit - Maximum allowed requests per window.
4 | -- ARGV[3]: cost - Weight of the current request.
5 | -- KEYS[1]: key - Redis key storing the current window count.
6 |
7 | local period = tonumber(ARGV[1])
8 | local limit = tonumber(ARGV[2])
9 | local cost = tonumber(ARGV[3])
10 | local current = redis.call("INCRBY", KEYS[1], cost)
11 |
12 | -- Set expiration only for first request in new window.
13 | if current == cost then
14 | redis.call("EXPIRE", KEYS[1], period)
15 | end
16 |
17 | -- Return [limited, current]
18 | -- is_limited: 1 if over limit, 0 otherwise.
19 | -- current: current count in current window.
20 | return {current > limit and 1 or 0, current}
21 |
--------------------------------------------------------------------------------
/throttled/rate_limiter/lua/gcra.lua:
--------------------------------------------------------------------------------
1 | -- GCRA (Generic Cell Rate Algorithm) implementation for rate limiting.
2 | -- Inspire by [Rate Limiting, Cells, and GCRA](https://brandur.org/rate-limiting).
3 | -- ARGV[1]: emission_interval - Time interval to add one Token.
4 | -- ARGV[2]: capacity - Maximum number of tokens.
5 | -- ARGV[3]: cost - Number of tokens required for the current request.
6 | -- KEYS[1]: Redis key to store the last token generation time.
7 |
8 | local emission_interval = tonumber(ARGV[1])
9 | local capacity = tonumber(ARGV[2])
10 | local cost = tonumber(ARGV[3])
11 |
12 | local jan_1_2025 = 1735660800
13 | local now = redis.call("TIME")
14 | now = (now[1] - jan_1_2025) + (now[2] / 1000000)
15 |
16 | local last_tat = redis.call("GET", KEYS[1])
17 | if not last_tat then
18 | last_tat = now
19 | else
20 | last_tat = tonumber(last_tat)
21 | end
22 |
23 | -- Calculate the fill time required for the current cost.
24 | local fill_time_for_cost = cost * emission_interval
25 | -- Calculate the fill time required for the full capacity.
26 | local fill_time_for_capacity = capacity * emission_interval
27 | -- Calculate the theoretical arrival time (TAT) for the current request.
28 | local tat = math.max(now, last_tat) + fill_time_for_cost
29 | -- Calculate the the time when the request would be allowed.
30 | local allow_at = tat - fill_time_for_capacity
31 | -- Calculate the time elapsed since the request would be allowed.
32 | local time_elapsed = now - allow_at
33 |
34 |
35 | local limited = 0
36 | local retry_after = 0
37 | local reset_after = tat - now
38 | local remaining = math.floor(time_elapsed / emission_interval)
39 | if remaining < 0 then
40 | limited = 1
41 | retry_after = time_elapsed * -1
42 | reset_after = math.max(0, last_tat - now)
43 | remaining = math.min(capacity, cost + remaining)
44 | else
45 | if reset_after > 0 then
46 | redis.call("SET", KEYS[1], tat, "EX", math.ceil(reset_after))
47 | end
48 | end
49 |
50 | -- Return [limited, remaining, reset_after, retry_after]
51 | -- limited: 1 if the request is limited, 0 otherwise.
52 | -- remaining: Available tokens after the current request.
53 | -- reset_after: Time in seconds until rate limiter resets(string to preserve precision).
54 | -- retry_after: Time in seconds until the request is allowed(string to preserve precision).
55 | return {limited, remaining, tostring(reset_after), tostring(retry_after)}
56 |
--------------------------------------------------------------------------------
/throttled/rate_limiter/lua/gcra_peek.lua:
--------------------------------------------------------------------------------
1 | -- GCRA (Generic Cell Rate Algorithm) implementation for rate limiting.
2 | -- Inspire by [Rate Limiting, Cells, and GCRA](https://brandur.org/rate-limiting).
3 | -- ARGV[1]: emission_interval - Time interval to add one Token.
4 | -- ARGV[2]: capacity - Maximum number of tokens.
5 | -- KEYS[1]: Redis key to store the last token generation time.
6 |
7 | local emission_interval = tonumber(ARGV[1])
8 | local capacity = tonumber(ARGV[2])
9 |
10 | local jan_1_2025 = 1735660800
11 | local now = redis.call("TIME")
12 | now = (now[1] - jan_1_2025) + (now[2] / 1000000)
13 |
14 | local tat = redis.call("GET", KEYS[1])
15 | if not tat then
16 | tat = now
17 | else
18 | tat= tonumber(tat)
19 | end
20 |
21 | -- Calculate the fill time required for the full capacity.
22 | local fill_time_for_capacity = capacity * emission_interval
23 | -- Calculate the the time when the request would be allowed.
24 | local allow_at = math.max(tat, now) - fill_time_for_capacity
25 | -- Calculate the time elapsed since the request would be allowed.
26 | local time_elapsed = now - allow_at
27 |
28 | local limited = 0
29 | local retry_after = 0
30 | local reset_after = math.max(0, tat - now)
31 | local remaining = math.floor(time_elapsed / emission_interval)
32 | if remaining < 1 then
33 | limited = 1
34 | remaining = 0
35 | retry_after = math.abs(time_elapsed)
36 | end
37 |
38 | -- Return [limited, remaining, reset_after, retry_after]
39 | -- limited: 1 if the request is limited, 0 otherwise.
40 | -- remaining: Available tokens after the current request.
41 | -- reset_after: Time in seconds until rate limiter resets(string to preserve precision).
42 | -- retry_after: Time in seconds until the request is allowed(string to preserve precision).
43 | return {limited, remaining, tostring(reset_after), tostring(retry_after)}
44 |
--------------------------------------------------------------------------------
/throttled/rate_limiter/lua/leaking_bucket.lua:
--------------------------------------------------------------------------------
1 | -- Leaking bucket based on [As a meter](https://en.wikipedia.org/wiki/Leaky_bucket).
2 | -- ARGV[1]: rate - Leak rate (requests processed per second).
3 | -- ARGV[2]: capacity - Maximum capacity of the bucket.
4 | -- ARGV[3]: cost - Weight of current request.
5 | -- ARGV[4]: now - Current timestamp in seconds.
6 | -- KEYS[1]: Redis hash key storing bucket state.
7 |
8 | local rate = tonumber(ARGV[1])
9 | local capacity = tonumber(ARGV[2])
10 | local cost = tonumber(ARGV[3])
11 | local now = tonumber(ARGV[4])
12 |
13 | -- Start with empty bucket.
14 | local last_tokens = 0
15 | -- Initialize last leak time.
16 | local last_refreshed = now
17 | -- Get stored bucket state from Redis
18 | local bucket = redis.call("HMGET", KEYS[1], "tokens", "last_refreshed")
19 |
20 | -- Override defaults if bucket exists.
21 | if bucket[1] ~= false then
22 | last_tokens = tonumber(bucket[1])
23 | last_refreshed = tonumber(bucket[2])
24 | end
25 |
26 | -- Calculate time elapsed since last leak.
27 | local time_elapsed = math.max(0, now - last_refreshed)
28 | -- Calculate new water level(leak over time).
29 | local tokens = math.max(0, last_tokens - (math.floor(time_elapsed * rate)))
30 |
31 | -- Check if request exceeds available water level.
32 | local limited = tokens + cost > capacity
33 | if limited then
34 | return {limited, capacity - tokens}
35 | end
36 |
37 | -- Time to empty full bucket.
38 | local fill_time = capacity / rate
39 | -- Set expiration to prevent stale data.
40 | redis.call("EXPIRE", KEYS[1], math.floor(2 * fill_time))
41 | -- Store new water level and update timestamp.
42 | redis.call("HSET", KEYS[1], "tokens", tokens + cost, "last_refreshed", now)
43 |
44 | -- Return [limited, remaining]
45 | -- limited: 1 if over limit, 0 otherwise.
46 | -- remaining: available capacity after processing request.
47 | return {limited, capacity - (tokens + cost)}
48 |
--------------------------------------------------------------------------------
/throttled/rate_limiter/lua/sliding_window.lua:
--------------------------------------------------------------------------------
1 | -- Sliding Window algorithm implementation for rate limiting.
2 | -- ARGV[1]: period - The window period in seconds.
3 | -- ARGV[2]: limit - Maximum allowed requests per window.
4 | -- ARGV[3]: cost - Weight of the current request.
5 | -- ARGV[4]: now_ms - Current time in milliseconds.
6 | -- KEYS[1]: key - Redis key storing the current window count.
7 | -- KEYS[2]: previous - Redis key storing the previous window count.
8 |
9 | local period = tonumber(ARGV[1])
10 | local limit = tonumber(ARGV[2])
11 | local cost = tonumber(ARGV[3])
12 | local now_ms = tonumber(ARGV[4])
13 |
14 | local exists = true
15 | local current = redis.call("GET", KEYS[1])
16 | if current == false then
17 | -- Initialize the current window count if it doesn't exist.
18 | current = 0
19 | exists = false
20 | end
21 |
22 | -- Get previous window count.
23 | local previous = redis.call("GET", KEYS[2])
24 | if previous == false then
25 | -- Default to 0 if previous window count doesn't exist.
26 | previous = 0
27 | end
28 |
29 | -- Calculate the current window count proportion.
30 | -- For example, if the period is 10 seconds, and the current time is 1234567890,
31 | -- the current window count proportion is (1234567890 % 10000) / 10000 = 0.23456789.
32 | local period_ms = period * 1000
33 | local current_proportion = (now_ms % period_ms) / period_ms
34 | local previous_proportion = 1- current_proportion
35 | -- Calculate the previous window count proportion.
36 | previous = math.floor(previous_proportion * previous)
37 |
38 | local retry_after = 0
39 | local used = previous + current + cost
40 | local limited = used > limit and cost ~= 0
41 | if limited then
42 | if cost <= previous then
43 | retry_after = previous_proportion * period * cost / previous
44 | else
45 | -- |-- previous --|- current -|------- new period -------|
46 | retry_after = previous_proportion * period
47 | end
48 | else
49 | -- Update the current window count.
50 | if exists then
51 | -- Increment the current count by the cost.
52 | redis.call("INCRBY", KEYS[1], cost)
53 | else
54 | -- Set expiration only for the first request in a new window.
55 | redis.call("SET", KEYS[1], cost, "EX", 3 * period)
56 | end
57 | end
58 |
59 | -- Return [limited, current]
60 | -- limited: 1 if over limit, 0 otherwise.
61 | -- current: current count in current window.
62 | -- retry_after: time in seconds to wait before retrying.
63 | return {limited, used, tostring(retry_after)}
64 |
--------------------------------------------------------------------------------
/throttled/rate_limiter/lua/token_bucket.lua:
--------------------------------------------------------------------------------
1 | -- Token Bucket algorithm implementation for rate limiting.
2 | -- ARGV[1]: rate - Tokens generated per second
3 | -- ARGV[2]: capacity - Maximum number of tokens the bucket can hold.
4 | -- ARGV[3]: cost - Number of tokens required for the current request.
5 | -- ARGV[4]: now - Current time in seconds.
6 | -- KEYS[1]: Redis hash key storing bucket state.
7 |
8 | local rate = tonumber(ARGV[1])
9 | local capacity = tonumber(ARGV[2])
10 | local cost = tonumber(ARGV[3])
11 | local now = tonumber(ARGV[4])
12 |
13 | -- Initialize default bucket state.
14 | local last_tokens = capacity -- Start with full bucket.
15 | local last_refreshed = now -- Initialize last refresh time.
16 | -- Get stored bucket state from Redis
17 | local bucket = redis.call("HMGET", KEYS[1], "tokens", "last_refreshed")
18 |
19 | -- Override default bucket state with stored state.
20 | if bucket[1] ~= false then
21 | last_tokens = tonumber(bucket[1])
22 | last_refreshed = tonumber(bucket[2])
23 | end
24 |
25 | -- Calculate time elapsed since last refresh.
26 | local time_elapsed = math.max(0, now - last_refreshed)
27 | -- Calculate new tokens based on time elapsed.
28 | local tokens = math.min(capacity, last_tokens + (math.floor(time_elapsed * rate)))
29 |
30 | -- Check if request exceeds available tokens.
31 | local limited = cost > tokens
32 | if limited then
33 | return {limited, tokens}
34 | end
35 |
36 | -- Deduct tokens for current request.
37 | tokens = tokens - cost
38 | -- Calculate time to refill bucket.
39 | local fill_time = capacity / rate
40 | -- Update bucket state in Redis.
41 | redis.call("HSET", KEYS[1], "tokens", tokens, "last_refreshed", now)
42 | redis.call("EXPIRE", KEYS[1], math.floor(2 * fill_time))
43 |
44 | -- Return [limited, tokens]
45 | -- limited: 1 if over limit, 0 otherwise.
46 | -- tokens: number of tokens remaining in bucket.
47 | return {limited, tokens}
48 |
--------------------------------------------------------------------------------
/throttled/store/__init__.py:
--------------------------------------------------------------------------------
1 | from .base import BaseAtomicAction, BaseStore, BaseStoreBackend
2 | from .memory import MemoryStore, MemoryStoreBackend
3 | from .redis import RedisStore, RedisStoreBackend
4 | from .redis_pool import (
5 | BaseConnectionFactory,
6 | ConnectionFactory,
7 | SentinelConnectionFactory,
8 | get_connection_factory,
9 | )
10 |
11 | __all__ = [
12 | "BaseStoreBackend",
13 | "BaseAtomicAction",
14 | "BaseStore",
15 | "MemoryStoreBackend",
16 | "MemoryStore",
17 | "RedisStoreBackend",
18 | "RedisStore",
19 | "BaseConnectionFactory",
20 | "ConnectionFactory",
21 | "SentinelConnectionFactory",
22 | "get_connection_factory",
23 | ]
24 |
--------------------------------------------------------------------------------
/throttled/store/base.py:
--------------------------------------------------------------------------------
1 | import abc
2 | from typing import Any, Dict, Optional, Sequence, Type
3 |
4 | from ..exceptions import DataError
5 | from ..types import AtomicActionP, AtomicActionTypeT, KeyT, StoreDictValueT, StoreValueT
6 |
7 |
8 | class BaseStoreBackend(abc.ABC):
9 | """Abstract class for all store backends."""
10 |
11 | def __init__(
12 | self, server: Optional[str] = None, options: Optional[Dict[str, Any]] = None
13 | ) -> None:
14 | self.server: Optional[str] = server
15 | self.options: Dict[str, Any] = options or {}
16 |
17 | @abc.abstractmethod
18 | def get_client(self) -> Any:
19 | raise NotImplementedError
20 |
21 |
22 | class BaseAtomicActionMixin:
23 | """Mixin class for AtomicAction.
24 |
25 | This class provides shared logic for both sync and async implementations of
26 | AtomicAction. It includes type checking and helper methods that ensure
27 | compatibility between store types.
28 | """
29 |
30 | # TYPE is the identifier of AtomicAction, must be unique under STORE_TYPE.
31 | TYPE: AtomicActionTypeT = ""
32 | # STORE_TYPE is the expected type of store with which AtomicAction is compatible.
33 | STORE_TYPE: str = ""
34 |
35 | def __init__(self, backend: BaseStoreBackend):
36 | pass
37 |
38 |
39 | class BaseAtomicAction(BaseAtomicActionMixin, abc.ABC):
40 | """Abstract class for all atomic actions performed by a store backend."""
41 |
42 | @abc.abstractmethod
43 | def do(self, keys: Sequence[KeyT], args: Optional[Sequence[StoreValueT]]) -> Any:
44 | """Execute the AtomicAction on the specified keys with optional arguments.
45 | :param keys: A sequence of keys.
46 | :param args: Optional sequence of arguments.
47 | :return: Any: The result of the AtomicAction.
48 | """
49 | raise NotImplementedError
50 |
51 |
52 | class BaseStoreMixin:
53 | """Mixin class for async / sync BaseStore."""
54 |
55 | # TYPE is a unique identifier for the type of store.
56 | TYPE: str = ""
57 |
58 | @classmethod
59 | def _validate_timeout(cls, timeout: int) -> None:
60 | """Validate the timeout.
61 | :param timeout: The timeout in seconds.
62 | :raise: DataError
63 | """
64 | if isinstance(timeout, int) and timeout > 0:
65 | return
66 |
67 | raise DataError(
68 | "Invalid timeout: {timeout}, Must be an integer greater than 0.".format(
69 | timeout=timeout
70 | )
71 | )
72 |
73 | def __init__(
74 | self, server: Optional[str] = None, options: Optional[Dict[str, Any]] = None
75 | ):
76 | pass
77 |
78 |
79 | class BaseStore(BaseStoreMixin, abc.ABC):
80 | """Abstract class for all stores."""
81 |
82 | @abc.abstractmethod
83 | def exists(self, key: KeyT) -> bool:
84 | """Check if the specified key exists.
85 |
86 | :param key: The key to check.
87 | :return: ``True`` if the specified key exists, ``False`` otherwise.
88 | """
89 | raise NotImplementedError
90 |
91 | @abc.abstractmethod
92 | def ttl(self, key: KeyT) -> int:
93 | """Returns the number of seconds until the specified key will expire.
94 |
95 | :param key: The key to check.
96 | :raise: :class:`throttled.exceptions.DataError` if the key does not exist
97 | or is not set.
98 | """
99 | raise NotImplementedError
100 |
101 | @abc.abstractmethod
102 | def expire(self, key: KeyT, timeout: int) -> None:
103 | """Set the expiration time for the specified key.
104 |
105 | :param key: The key to set the expiration for.
106 | :param timeout: The timeout in seconds.
107 | """
108 | raise NotImplementedError
109 |
110 | @abc.abstractmethod
111 | def set(self, key: KeyT, value: StoreValueT, timeout: int) -> None:
112 | """Set a value for the specified key with specified timeout.
113 |
114 | :param key: The key to set.
115 | :param value: The value to set.
116 | :param timeout: The timeout in seconds.
117 | """
118 | raise NotImplementedError
119 |
120 | @abc.abstractmethod
121 | def get(self, key: KeyT) -> Optional[StoreValueT]:
122 | """Get a value for the specified key.
123 |
124 | :param key: The key for which to get a value.
125 | :return: The value for the specified key, or None if it does not exist.
126 | """
127 | raise NotImplementedError
128 |
129 | @abc.abstractmethod
130 | def hset(
131 | self,
132 | name: KeyT,
133 | key: Optional[KeyT] = None,
134 | value: Optional[StoreValueT] = None,
135 | mapping: Optional[StoreDictValueT] = None,
136 | ) -> None:
137 | """Set a value for the specified key in the specified hash.
138 |
139 | :param name: The name of the hash.
140 | :param key: The key in the hash.
141 | :param value: The value to set.
142 | :param mapping: A dictionary of key-value pairs to set.
143 | """
144 | raise NotImplementedError
145 |
146 | @abc.abstractmethod
147 | def hgetall(self, name: KeyT) -> StoreDictValueT:
148 | raise NotImplementedError
149 |
150 | @abc.abstractmethod
151 | def make_atomic(self, action_cls: Type[AtomicActionP]) -> AtomicActionP:
152 | """Create an instance of an AtomicAction for this store.
153 | :param action_cls: The class of the AtomicAction.
154 | :return: The AtomicAction instance.
155 | """
156 | raise NotImplementedError
157 |
--------------------------------------------------------------------------------
/throttled/store/redis.py:
--------------------------------------------------------------------------------
1 | from typing import TYPE_CHECKING, Any, Dict, Optional, Type, Union
2 |
3 | from ..constants import StoreType
4 | from ..exceptions import DataError
5 | from ..types import AtomicActionP, KeyT, StoreDictValueT, StoreValueT
6 | from ..utils import format_kv, format_value
7 | from .base import BaseStore, BaseStoreBackend
8 | from .redis_pool import BaseConnectionFactory, get_connection_factory
9 |
10 | if TYPE_CHECKING:
11 | import redis
12 | import redis.asyncio as aioredis
13 |
14 | Redis = Union[redis.Redis, aioredis.Redis]
15 |
16 |
17 | class RedisStoreBackend(BaseStoreBackend):
18 | """Backend for Redis store."""
19 |
20 | def __init__(
21 | self, server: Optional[str] = None, options: Optional[Dict[str, Any]] = None
22 | ):
23 | super().__init__(server, options)
24 |
25 | self._client: Optional["Redis"] = None
26 |
27 | connection_factory_cls_path: Optional[str] = self.options.get(
28 | "CONNECTION_FACTORY_CLASS"
29 | )
30 |
31 | self._connection_factory: BaseConnectionFactory = get_connection_factory(
32 | connection_factory_cls_path, self.options
33 | )
34 |
35 | def get_client(self) -> "Redis":
36 | if self._client is None:
37 | self._client = self._connection_factory.connect(self.server)
38 | return self._client
39 |
40 |
41 | class RedisStore(BaseStore):
42 | """Concrete implementation of BaseStore using Redis as backend.
43 |
44 | :class:`throttled.store.RedisStore` is implemented based on
45 | `redis-py `_, you can use it for
46 | rate limiting in a distributed environment.
47 | """
48 |
49 | TYPE: str = StoreType.REDIS.value
50 |
51 | _BACKEND_CLASS: Type[RedisStoreBackend] = RedisStoreBackend
52 |
53 | def __init__(
54 | self, server: Optional[str] = None, options: Optional[Dict[str, Any]] = None
55 | ):
56 | """
57 | Initialize RedisStore, see
58 | :ref:`RedisStore Arguments `.
59 | """
60 | super().__init__(server, options)
61 | self._backend: RedisStoreBackend = self._BACKEND_CLASS(server, options)
62 |
63 | def exists(self, key: KeyT) -> bool:
64 | return bool(self._backend.get_client().exists(key))
65 |
66 | def ttl(self, key: KeyT) -> int:
67 | return int(self._backend.get_client().ttl(key))
68 |
69 | def expire(self, key: KeyT, timeout: int) -> None:
70 | self._validate_timeout(timeout)
71 | self._backend.get_client().expire(key, timeout)
72 |
73 | def set(self, key: KeyT, value: StoreValueT, timeout: int) -> None:
74 | self._validate_timeout(timeout)
75 | self._backend.get_client().set(key, value, ex=timeout)
76 |
77 | def get(self, key: KeyT) -> Optional[StoreValueT]:
78 | value: Optional[StoreValueT] = self._backend.get_client().get(key)
79 | if value is None:
80 | return None
81 |
82 | return format_value(value)
83 |
84 | def hset(
85 | self,
86 | name: KeyT,
87 | key: Optional[KeyT] = None,
88 | value: Optional[StoreValueT] = None,
89 | mapping: Optional[StoreDictValueT] = None,
90 | ) -> None:
91 | if key is None and not mapping:
92 | raise DataError("hset must with key value pairs")
93 | self._backend.get_client().hset(name, key, value, mapping)
94 |
95 | def hgetall(self, name: KeyT) -> StoreDictValueT:
96 | return format_kv(self._backend.get_client().hgetall(name))
97 |
98 | def make_atomic(self, action_cls: Type[AtomicActionP]) -> AtomicActionP:
99 | return action_cls(backend=self._backend)
100 |
--------------------------------------------------------------------------------
/throttled/types.py:
--------------------------------------------------------------------------------
1 | from typing import Any, Dict, Optional, Protocol, Sequence, Type, Union
2 |
3 | _StringLikeT = str
4 | _NumberLikeT = Union[int, float]
5 |
6 | KeyT = _StringLikeT
7 | StoreValueT = _NumberLikeT
8 | StoreDictValueT = Dict[KeyT, _NumberLikeT]
9 | StoreBucketValueT = Union[_NumberLikeT, StoreDictValueT]
10 |
11 | AtomicActionTypeT = str
12 |
13 | RateLimiterTypeT = str
14 |
15 | TimeLikeValueT = Union[int, float]
16 |
17 |
18 | class _SyncLockP(Protocol):
19 | """Protocol for sync lock."""
20 |
21 | def acquire(self) -> bool:
22 | ...
23 |
24 | def release(self) -> None:
25 | ...
26 |
27 | def __exit__(self, exc_type, exc, tb) -> None:
28 | ...
29 |
30 | __enter__ = acquire
31 |
32 |
33 | class _AsyncLockP(Protocol):
34 | """Protocol for async lock."""
35 |
36 | async def acquire(self) -> bool:
37 | ...
38 |
39 | def release(self) -> None:
40 | ...
41 |
42 | async def __aenter__(self) -> None:
43 | ...
44 |
45 | async def __aexit__(self, exc_type, exc, tb) -> None:
46 | ...
47 |
48 |
49 | LockP = Union[_SyncLockP, _AsyncLockP]
50 |
51 |
52 | class StoreBackendP(Protocol):
53 | def get_client(self):
54 | ...
55 |
56 |
57 | class _SyncAtomicActionP(Protocol):
58 | """_SyncAtomicActionP is a protocol for all sync atomic actions."""
59 |
60 | TYPE: AtomicActionTypeT
61 |
62 | STORE_TYPE: str
63 |
64 | def __init__(self, backend: StoreBackendP) -> None:
65 | ...
66 |
67 | def do(self, keys: Sequence[KeyT], args: Optional[Sequence[StoreValueT]]) -> Any:
68 | ...
69 |
70 |
71 | class _AsyncAtomicActionP(Protocol):
72 | """_AsyncAtomicActionP is a protocol for all async atomic actions."""
73 |
74 | TYPE: AtomicActionTypeT
75 |
76 | STORE_TYPE: str
77 |
78 | def __init__(self, backend: StoreBackendP) -> None:
79 | ...
80 |
81 | async def do(
82 | self, keys: Sequence[KeyT], args: Optional[Sequence[StoreValueT]]
83 | ) -> Any:
84 | ...
85 |
86 |
87 | AtomicActionP = Union[_SyncAtomicActionP, _AsyncAtomicActionP]
88 |
89 |
90 | class _SyncStoreP(Protocol):
91 | """_SyncStoreP is a protocol for all sync store backends."""
92 |
93 | TYPE: str
94 |
95 | def exists(self, key: KeyT) -> bool:
96 | ...
97 |
98 | def ttl(self, key: KeyT) -> int:
99 | ...
100 |
101 | def expire(self, key: KeyT, timeout: int) -> None:
102 | ...
103 |
104 | def set(self, key: KeyT, value: StoreValueT, timeout: int) -> None:
105 | ...
106 |
107 | def get(self, key: KeyT) -> Optional[StoreValueT]:
108 | ...
109 |
110 | def hgetall(self, name: KeyT) -> StoreDictValueT:
111 | ...
112 |
113 | def make_atomic(self, action: Type[AtomicActionP]) -> AtomicActionP:
114 | ...
115 |
116 | def hset(
117 | self,
118 | name: KeyT,
119 | key: Optional[KeyT] = None,
120 | value: Optional[StoreValueT] = None,
121 | mapping: Optional[StoreDictValueT] = None,
122 | ) -> None:
123 | ...
124 |
125 |
126 | class _AsyncStoreP(Protocol):
127 | """_AsyncStoreP is a protocol for all async store backends."""
128 |
129 | TYPE: str
130 |
131 | async def exists(self, key: KeyT) -> bool:
132 | ...
133 |
134 | async def ttl(self, key: KeyT) -> int:
135 | ...
136 |
137 | async def expire(self, key: KeyT, timeout: int) -> None:
138 | ...
139 |
140 | async def set(self, key: KeyT, value: StoreValueT, timeout: int) -> None:
141 | ...
142 |
143 | async def get(self, key: KeyT) -> Optional[StoreValueT]:
144 | ...
145 |
146 | async def hgetall(self, name: KeyT) -> StoreDictValueT:
147 | ...
148 |
149 | def make_atomic(self, action: Type[AtomicActionP]) -> AtomicActionP:
150 | ...
151 |
152 | async def hset(
153 | self,
154 | name: KeyT,
155 | key: Optional[KeyT] = None,
156 | value: Optional[StoreValueT] = None,
157 | mapping: Optional[StoreDictValueT] = None,
158 | ) -> None:
159 | ...
160 |
161 |
162 | StoreP = Union[_SyncStoreP, _AsyncStoreP]
163 |
--------------------------------------------------------------------------------