├── qstash ├── py.typed ├── asyncio │ ├── __init__.py │ ├── signing_key.py │ ├── log.py │ ├── client.py │ ├── queue.py │ ├── url_group.py │ ├── dlq.py │ ├── http.py │ ├── schedule.py │ └── message.py ├── __init__.py ├── chat.py ├── errors.py ├── signing_key.py ├── client.py ├── receiver.py ├── queue.py ├── url_group.py ├── http.py ├── dlq.py ├── log.py └── schedule.py ├── .env.example ├── tests ├── test_signing_key.py ├── asyncio │ ├── test_signing_key.py │ ├── test_url_group.py │ ├── test_queue.py │ ├── test_dlq.py │ ├── test_schedules.py │ └── test_message.py ├── test_url_group.py ├── test_receiver.py ├── test_queue.py ├── __init__.py ├── test_dlq.py ├── conftest.py ├── test_schedules.py └── test_message.py ├── examples ├── basic_publish.py ├── basic_schedule.py ├── async_publish.py ├── callback.py └── llm.py ├── LICENSE ├── .github └── workflows │ ├── release.yml │ └── tests.yml ├── pyproject.toml ├── .gitignore └── README.md /qstash/py.typed: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /qstash/asyncio/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.env.example: -------------------------------------------------------------------------------- 1 | QSTASH_TOKEN="YOUR_TOKEN" 2 | QSTASH_CURRENT_SIGNING_KEY="" 3 | QSTASH_NEXT_SIGNING_KEY="" 4 | OPENAI_API_KEY = "" 5 | -------------------------------------------------------------------------------- /qstash/__init__.py: -------------------------------------------------------------------------------- 1 | from qstash.asyncio.client import AsyncQStash 2 | from qstash.client import QStash 3 | from qstash.receiver import Receiver 4 | 5 | __version__ = "3.2.0" 6 | __all__ = ["QStash", "AsyncQStash", "Receiver"] 7 | -------------------------------------------------------------------------------- /tests/test_signing_key.py: -------------------------------------------------------------------------------- 1 | from qstash import QStash 2 | from tests import QSTASH_CURRENT_SIGNING_KEY, QSTASH_NEXT_SIGNING_KEY 3 | 4 | 5 | def test_get(client: QStash) -> None: 6 | key = client.signing_key.get() 7 | assert key.current == QSTASH_CURRENT_SIGNING_KEY 8 | assert key.next == QSTASH_NEXT_SIGNING_KEY 9 | -------------------------------------------------------------------------------- /tests/asyncio/test_signing_key.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from qstash import AsyncQStash 4 | from tests import QSTASH_CURRENT_SIGNING_KEY, QSTASH_NEXT_SIGNING_KEY 5 | 6 | 7 | @pytest.mark.asyncio 8 | async def test_get_async(async_client: AsyncQStash) -> None: 9 | key = await async_client.signing_key.get() 10 | assert key.current == QSTASH_CURRENT_SIGNING_KEY 11 | assert key.next == QSTASH_NEXT_SIGNING_KEY 12 | -------------------------------------------------------------------------------- /examples/basic_publish.py: -------------------------------------------------------------------------------- 1 | """ 2 | Publishes a JSON message with a 3s delay to a URL using QStash. 3 | """ 4 | 5 | from qstash import QStash 6 | 7 | 8 | def main() -> None: 9 | client = QStash( 10 | token="", 11 | ) 12 | 13 | res = client.message.publish_json( 14 | url="https://example.com", 15 | body={"hello": "world"}, 16 | headers={ 17 | "test-header": "test-value", 18 | }, 19 | delay="3s", 20 | ) 21 | 22 | print(res.message_id) # type:ignore[union-attr] 23 | 24 | 25 | if __name__ == "__main__": 26 | main() 27 | -------------------------------------------------------------------------------- /examples/basic_schedule.py: -------------------------------------------------------------------------------- 1 | """ 2 | Create a schedule that publishes a message every minute. 3 | """ 4 | 5 | from qstash import QStash 6 | 7 | 8 | def main() -> None: 9 | client = QStash( 10 | token="", 11 | ) 12 | 13 | schedule_id = client.schedule.create_json( 14 | cron="* * * * *", 15 | destination="https://example..com", 16 | body={"hello": "world"}, 17 | ) 18 | 19 | # Print out the schedule ID 20 | print(schedule_id) 21 | 22 | # You can also get a schedule by ID 23 | schedule = client.schedule.get(schedule_id) 24 | print(schedule.cron) 25 | 26 | 27 | if __name__ == "__main__": 28 | main() 29 | -------------------------------------------------------------------------------- /examples/async_publish.py: -------------------------------------------------------------------------------- 1 | """ 2 | Uses asyncio to asynchronously publish a JSON message with a 3s delay to a URL using QStash. 3 | """ 4 | 5 | import asyncio 6 | 7 | from qstash import AsyncQStash 8 | 9 | 10 | async def main() -> None: 11 | client = AsyncQStash( 12 | token="", 13 | ) 14 | 15 | res = await client.message.publish_json( 16 | url="https://example.com", 17 | body={"hello": "world"}, 18 | headers={ 19 | "test-header": "test-value", 20 | }, 21 | delay="3s", 22 | ) 23 | 24 | print(res.message_id) # type:ignore[union-attr] 25 | 26 | 27 | if __name__ == "__main__": 28 | asyncio.run(main()) 29 | -------------------------------------------------------------------------------- /examples/callback.py: -------------------------------------------------------------------------------- 1 | """ 2 | Publish a message to a URL and send the response to a callback URL. 3 | 4 | This is useful if you have a time consuming API call 5 | and you want to send the response to your API URL without having 6 | to wait for the response in a serverless function. 7 | """ 8 | 9 | from qstash import QStash 10 | 11 | 12 | def main() -> None: 13 | client = QStash( 14 | token="", 15 | ) 16 | 17 | client.message.publish_json( 18 | url="https://expensive.com", 19 | callback="https://example-cb.com", 20 | # We want to send a GET request to https://expensive.com and have the response 21 | # sent to https://example-cb.com 22 | method="GET", 23 | ) 24 | 25 | 26 | if __name__ == "__main__": 27 | main() 28 | -------------------------------------------------------------------------------- /qstash/chat.py: -------------------------------------------------------------------------------- 1 | import dataclasses 2 | import re 3 | 4 | 5 | @dataclasses.dataclass 6 | class LlmProvider: 7 | name: str 8 | """Name of the LLM provider.""" 9 | 10 | base_url: str 11 | """Base URL of the provider.""" 12 | 13 | token: str 14 | """ 15 | The token for the provider. 16 | 17 | The provided key will be passed to the 18 | endpoint as a bearer token. 19 | """ 20 | 21 | 22 | def openai(token: str) -> LlmProvider: 23 | return LlmProvider( 24 | name="OpenAI", 25 | base_url="https://api.openai.com", 26 | token=token, 27 | ) 28 | 29 | 30 | def custom(base_url: str, token: str) -> LlmProvider: 31 | base_url = re.sub("/(v1/)?chat/completions$", "", base_url) 32 | return LlmProvider( 33 | name="custom", 34 | base_url=base_url, 35 | token=token, 36 | ) 37 | -------------------------------------------------------------------------------- /examples/llm.py: -------------------------------------------------------------------------------- 1 | """ 2 | Create a chat completion request and send the response to a callback URL. 3 | 4 | This is useful to send the response to your API without having 5 | to wait for the response in a serverless function. 6 | """ 7 | 8 | from qstash import QStash 9 | from qstash.chat import openai 10 | 11 | 12 | def main() -> None: 13 | client = QStash( 14 | token="", 15 | ) 16 | 17 | client.message.publish_json( 18 | api={"name": "llm", "provider": openai("")}, 19 | body={ 20 | "model": "gpt-4.1", 21 | "messages": [ 22 | { 23 | "role": "user", 24 | "content": "What is the capital of Turkey?", 25 | } 26 | ], 27 | }, 28 | callback="https://example-cb.com", 29 | # We want to send the response to https://example-cb.com 30 | ) 31 | 32 | 33 | if __name__ == "__main__": 34 | main() 35 | -------------------------------------------------------------------------------- /qstash/errors.py: -------------------------------------------------------------------------------- 1 | from typing import Optional 2 | 3 | 4 | class QStashError(Exception): ... 5 | 6 | 7 | class SignatureError(QStashError): 8 | def __init__(self, *args: object) -> None: 9 | super().__init__(*args) 10 | 11 | 12 | class RateLimitExceededError(QStashError): 13 | def __init__( 14 | self, limit: Optional[str], remaining: Optional[str], reset: Optional[str] 15 | ): 16 | super().__init__( 17 | f"Exceeded burst rate limit: Limit: {limit}, remaining: {remaining}, reset: {reset}" 18 | ) 19 | self.limit = limit 20 | self.remaining = remaining 21 | self.reset = reset 22 | 23 | 24 | class DailyMessageLimitExceededError(QStashError): 25 | def __init__( 26 | self, limit: Optional[str], remaining: Optional[str], reset: Optional[str] 27 | ): 28 | super().__init__( 29 | f"Exceeded daily message limit: Limit: {limit}, remaining: {remaining}, reset: {reset}" 30 | ) 31 | self.limit = limit 32 | self.remaining = remaining 33 | self.reset = reset 34 | -------------------------------------------------------------------------------- /qstash/asyncio/signing_key.py: -------------------------------------------------------------------------------- 1 | from qstash.asyncio.http import AsyncHttpClient 2 | from qstash.signing_key import SigningKey, parse_signing_key_response 3 | 4 | 5 | class AsyncSigningKeyApi: 6 | def __init__(self, http: AsyncHttpClient) -> None: 7 | self._http = http 8 | 9 | async def get(self) -> SigningKey: 10 | """ 11 | Gets the current and next signing keys. 12 | """ 13 | response = await self._http.request( 14 | path="/v2/keys", 15 | method="GET", 16 | ) 17 | 18 | return parse_signing_key_response(response) 19 | 20 | async def rotate(self) -> SigningKey: 21 | """ 22 | Rotates the current signing key and gets the new signing key. 23 | 24 | The next signing key becomes the current signing 25 | key, and a new signing key is assigned to the 26 | next signing key. 27 | """ 28 | response = await self._http.request( 29 | path="/v2/keys/rotate", 30 | method="POST", 31 | ) 32 | 33 | return parse_signing_key_response(response) 34 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 Upstash, Inc. 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: Release 2 | 3 | on: workflow_dispatch 4 | 5 | jobs: 6 | release: 7 | runs-on: ubuntu-latest 8 | 9 | steps: 10 | - name: Checkout repository 11 | uses: actions/checkout@v2 12 | 13 | - name: Set up Python 14 | uses: actions/setup-python@v2 15 | with: 16 | python-version: 3.8 17 | 18 | - name: Install Poetry 19 | run: curl -sSL https://install.python-poetry.org | python3 - --version 1.8.4 20 | 21 | - name: Build and publish 22 | run: | 23 | poetry config pypi-token.pypi "${{secrets.PYPI_TOKEN}}" 24 | poetry build 25 | poetry publish --no-interaction 26 | 27 | - name: Generate release tag 28 | run: echo "RELEASE_TAG=v$(poetry version | awk '{print $2}')" >> $GITHUB_ENV 29 | 30 | - name: Create GitHub Release 31 | uses: actions/create-release@v1 32 | env: 33 | GITHUB_TOKEN: ${{ secrets.PERSONAL_ACCESS_TOKEN }} 34 | with: 35 | tag_name: ${{ env.RELEASE_TAG }} 36 | release_name: Release ${{ env.RELEASE_TAG }} 37 | draft: false 38 | prerelease: false -------------------------------------------------------------------------------- /qstash/signing_key.py: -------------------------------------------------------------------------------- 1 | import dataclasses 2 | from typing import Any, Dict 3 | 4 | from qstash.http import HttpClient 5 | 6 | 7 | @dataclasses.dataclass 8 | class SigningKey: 9 | current: str 10 | """Current signing key.""" 11 | 12 | next: str 13 | """Next signing key.""" 14 | 15 | 16 | def parse_signing_key_response(response: Dict[str, Any]) -> SigningKey: 17 | return SigningKey( 18 | current=response["current"], 19 | next=response["next"], 20 | ) 21 | 22 | 23 | class SigningKeyApi: 24 | def __init__(self, http: HttpClient) -> None: 25 | self._http = http 26 | 27 | def get(self) -> SigningKey: 28 | """ 29 | Gets the current and next signing keys. 30 | """ 31 | response = self._http.request( 32 | path="/v2/keys", 33 | method="GET", 34 | ) 35 | 36 | return parse_signing_key_response(response) 37 | 38 | def rotate(self) -> SigningKey: 39 | """ 40 | Rotates the current signing key and gets the new signing key. 41 | 42 | The next signing key becomes the current signing 43 | key, and a new signing key is assigned to the 44 | next signing key. 45 | """ 46 | response = self._http.request( 47 | path="/v2/keys/rotate", 48 | method="POST", 49 | ) 50 | 51 | return parse_signing_key_response(response) 52 | -------------------------------------------------------------------------------- /tests/test_url_group.py: -------------------------------------------------------------------------------- 1 | from qstash import QStash 2 | 3 | 4 | def test_url_group(client: QStash) -> None: 5 | name = "python_url_group" 6 | client.url_group.delete(name) 7 | 8 | client.url_group.upsert_endpoints( 9 | url_group=name, 10 | endpoints=[ 11 | {"url": "https://mock.httpstatus.io/200"}, 12 | {"url": "https://mock.httpstatus.io/201"}, 13 | ], 14 | ) 15 | 16 | url_group = client.url_group.get(name) 17 | assert url_group.name == name 18 | assert any( 19 | True for e in url_group.endpoints if e.url == "https://mock.httpstatus.io/200" 20 | ) 21 | assert any( 22 | True for e in url_group.endpoints if e.url == "https://mock.httpstatus.io/201" 23 | ) 24 | 25 | url_groups = client.url_group.list() 26 | assert any(True for ug in url_groups if ug.name == name) 27 | 28 | client.url_group.remove_endpoints( 29 | url_group=name, 30 | endpoints=[ 31 | { 32 | "url": "https://mock.httpstatus.io/201", 33 | } 34 | ], 35 | ) 36 | 37 | url_group = client.url_group.get(name) 38 | assert url_group.name == name 39 | assert any( 40 | True for e in url_group.endpoints if e.url == "https://mock.httpstatus.io/200" 41 | ) 42 | assert not any( 43 | True for e in url_group.endpoints if e.url == "https://mock.httpstatus.io/201" 44 | ) 45 | -------------------------------------------------------------------------------- /.github/workflows/tests.yml: -------------------------------------------------------------------------------- 1 | name: Test 2 | 3 | on: 4 | workflow_dispatch: 5 | pull_request: 6 | branches: 7 | - main 8 | push: 9 | branches: 10 | - main 11 | 12 | jobs: 13 | test: 14 | runs-on: ubuntu-latest 15 | 16 | steps: 17 | - name: Checkout repository 18 | uses: actions/checkout@v2 19 | 20 | - name: Set up Python 21 | uses: actions/setup-python@v2 22 | with: 23 | python-version: 3.8 24 | 25 | - name: Install Poetry 26 | run: curl -sSL https://install.python-poetry.org | python3 - --version 1.8.4 27 | 28 | - name: Set up Poetry environment 29 | run: | 30 | poetry cache clear PyPI --all 31 | poetry install --no-root 32 | 33 | - name: Run ruff 34 | run: | 35 | poetry run ruff check . 36 | 37 | - name: Run ruff format 38 | run: | 39 | poetry run ruff format --check . 40 | 41 | - name: Run mypy 42 | run: | 43 | poetry run mypy --show-error-codes --strict . 44 | 45 | - name: Run tests 46 | run: | 47 | export QSTASH_TOKEN="${{ secrets.QSTASH_TOKEN }}" 48 | export QSTASH_CURRENT_SIGNING_KEY="${{ secrets.QSTASH_CURRENT_SIGNING_KEY }}" 49 | export QSTASH_NEXT_SIGNING_KEY="${{ secrets.QSTASH_NEXT_SIGNING_KEY }}" 50 | export OPENAI_API_KEY="${{ secrets.OPENAI_API_KEY }}" 51 | poetry run pytest 52 | -------------------------------------------------------------------------------- /qstash/asyncio/log.py: -------------------------------------------------------------------------------- 1 | from typing import Optional 2 | 3 | from qstash.asyncio.http import AsyncHttpClient 4 | from qstash.log import ( 5 | LogFilter, 6 | ListLogsResponse, 7 | parse_logs_response, 8 | prepare_list_logs_request_params, 9 | ) 10 | 11 | 12 | class AsyncLogApi: 13 | def __init__(self, http: AsyncHttpClient) -> None: 14 | self._http = http 15 | 16 | async def list( 17 | self, 18 | *, 19 | cursor: Optional[str] = None, 20 | count: Optional[int] = None, 21 | filter: Optional[LogFilter] = None, 22 | ) -> ListLogsResponse: 23 | """ 24 | Lists all logs that happened, such as message creation or delivery. 25 | 26 | :param cursor: Optional cursor to start listing logs from. 27 | :param count: The maximum number of logs to return. 28 | Default and max is `1000`. 29 | :param filter: Filter to use. 30 | """ 31 | params = prepare_list_logs_request_params( 32 | cursor=cursor, 33 | count=count, 34 | filter=filter, 35 | ) 36 | 37 | response = await self._http.request( 38 | path="/v2/events", 39 | method="GET", 40 | params=params, 41 | ) 42 | 43 | logs = parse_logs_response(response["events"]) 44 | 45 | return ListLogsResponse( 46 | cursor=response.get("cursor"), 47 | logs=logs, 48 | ) 49 | -------------------------------------------------------------------------------- /tests/asyncio/test_url_group.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from qstash import AsyncQStash 4 | 5 | 6 | @pytest.mark.asyncio 7 | async def test_url_group_async(async_client: AsyncQStash) -> None: 8 | name = "python_url_group" 9 | await async_client.url_group.delete(name) 10 | 11 | await async_client.url_group.upsert_endpoints( 12 | url_group=name, 13 | endpoints=[ 14 | {"url": "https://mock.httpstatus.io/200"}, 15 | {"url": "https://mock.httpstatus.io/201"}, 16 | ], 17 | ) 18 | 19 | url_group = await async_client.url_group.get(name) 20 | assert url_group.name == name 21 | assert any( 22 | True for e in url_group.endpoints if e.url == "https://mock.httpstatus.io/200" 23 | ) 24 | assert any( 25 | True for e in url_group.endpoints if e.url == "https://mock.httpstatus.io/201" 26 | ) 27 | 28 | url_groups = await async_client.url_group.list() 29 | assert any(True for ug in url_groups if ug.name == name) 30 | 31 | await async_client.url_group.remove_endpoints( 32 | url_group=name, 33 | endpoints=[ 34 | { 35 | "url": "https://mock.httpstatus.io/201", 36 | } 37 | ], 38 | ) 39 | 40 | url_group = await async_client.url_group.get(name) 41 | assert url_group.name == name 42 | assert any( 43 | True for e in url_group.endpoints if e.url == "https://mock.httpstatus.io/200" 44 | ) 45 | assert not any( 46 | True for e in url_group.endpoints if e.url == "https://mock.httpstatus.io/201" 47 | ) 48 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.poetry] 2 | name = "qstash" 3 | version = "3.2.0" 4 | description = "Python SDK for Upstash QStash" 5 | license = "MIT" 6 | authors = ["Upstash "] 7 | maintainers = ["Upstash "] 8 | readme = "README.md" 9 | repository = "https://github.com/upstash/qstash-py" 10 | keywords = ["QStash", "Upstash QStash", "Serverless Queue"] 11 | classifiers = [ 12 | "Development Status :: 5 - Production/Stable", 13 | "Intended Audience :: Developers", 14 | "License :: OSI Approved :: MIT License", 15 | "Operating System :: OS Independent", 16 | "Programming Language :: Python", 17 | "Programming Language :: Python :: 3", 18 | "Programming Language :: Python :: 3 :: Only", 19 | "Programming Language :: Python :: 3.8", 20 | "Programming Language :: Python :: 3.9", 21 | "Programming Language :: Python :: 3.10", 22 | "Programming Language :: Python :: 3.11", 23 | "Programming Language :: Python :: 3.12", 24 | "Programming Language :: Python :: 3.13", 25 | "Programming Language :: Python :: Implementation :: CPython", 26 | "Topic :: Database", 27 | "Topic :: Database :: Front-Ends", 28 | "Topic :: Software Development :: Libraries", 29 | ] 30 | 31 | packages = [{ include = "qstash" }] 32 | 33 | [tool.poetry.dependencies] 34 | python = "^3.8" 35 | httpx = ">=0.23.0, <1" 36 | pyjwt = "^2.8.0" 37 | 38 | [tool.poetry.group.dev.dependencies] 39 | pytest = "^8.2.2" 40 | python-dotenv = "^1.0.1" 41 | pytest-asyncio = "^0.23.7" 42 | mypy = "^1.10.0" 43 | ruff = "^0.11.7" 44 | 45 | [build-system] 46 | requires = ["poetry-core"] 47 | build-backend = "poetry.core.masonry.api" 48 | -------------------------------------------------------------------------------- /qstash/client.py: -------------------------------------------------------------------------------- 1 | from os import environ 2 | from typing import Optional, Union, Literal 3 | 4 | from qstash.dlq import DlqApi 5 | from qstash.log import LogApi 6 | from qstash.http import RetryConfig, HttpClient 7 | from qstash.message import MessageApi 8 | from qstash.queue import QueueApi 9 | from qstash.schedule import ScheduleApi 10 | from qstash.signing_key import SigningKeyApi 11 | from qstash.url_group import UrlGroupApi 12 | 13 | 14 | class QStash: 15 | """Synchronous SDK for the Upstash QStash.""" 16 | 17 | def __init__( 18 | self, 19 | token: str, 20 | *, 21 | retry: Optional[Union[Literal[False], RetryConfig]] = None, 22 | base_url: Optional[str] = None, 23 | ) -> None: 24 | """ 25 | :param token: The authorization token from the Upstash console. 26 | :param retry: Configures how the client should retry requests. 27 | """ 28 | self.http = HttpClient( 29 | token, 30 | retry, 31 | base_url or environ.get("QSTASH_URL"), 32 | ) 33 | self.message = MessageApi(self.http) 34 | """Message api.""" 35 | 36 | self.url_group = UrlGroupApi(self.http) 37 | """Url group api.""" 38 | 39 | self.queue = QueueApi(self.http) 40 | """Queue api.""" 41 | 42 | self.schedule = ScheduleApi(self.http) 43 | """Schedule api.""" 44 | 45 | self.signing_key = SigningKeyApi(self.http) 46 | """Signing key api.""" 47 | 48 | self.log = LogApi(self.http) 49 | """Log api.""" 50 | 51 | self.dlq = DlqApi(self.http) 52 | """Dlq (Dead Letter Queue) api.""" 53 | -------------------------------------------------------------------------------- /tests/test_receiver.py: -------------------------------------------------------------------------------- 1 | import base64 2 | import hashlib 3 | import json 4 | import time 5 | from typing import Optional 6 | 7 | import jwt 8 | import pytest 9 | 10 | from qstash import Receiver 11 | from qstash.errors import SignatureError 12 | from tests import QSTASH_CURRENT_SIGNING_KEY 13 | 14 | 15 | def get_signature(body: str, key: Optional[str]) -> str: 16 | body_hash = hashlib.sha256(body.encode()).digest() 17 | body_hash_b64 = base64.urlsafe_b64encode(body_hash).decode().rstrip("=") 18 | payload = { 19 | "aud": "", 20 | "body": body_hash_b64, 21 | "exp": int(time.time()) + 300, 22 | "iat": int(time.time()), 23 | "iss": "Upstash", 24 | "jti": time.time(), 25 | "nbf": int(time.time()), 26 | "sub": "https://mock.httpstatus.io/200", 27 | } 28 | signature = jwt.encode( 29 | payload, key, algorithm="HS256", headers={"alg": "HS256", "typ": "JWT"} 30 | ) 31 | return signature 32 | 33 | 34 | def test_receiver(receiver: Receiver) -> None: 35 | body = json.dumps({"hello": "world"}) 36 | sig = get_signature(body, QSTASH_CURRENT_SIGNING_KEY) 37 | 38 | receiver.verify( 39 | signature=sig, 40 | body=body, 41 | url="https://mock.httpstatus.io/200", 42 | ) 43 | 44 | 45 | def test_failed_verification(receiver: Receiver) -> None: 46 | body = json.dumps({"hello": "world"}) 47 | sig = get_signature(body, QSTASH_CURRENT_SIGNING_KEY) 48 | 49 | with pytest.raises(SignatureError): 50 | receiver.verify( 51 | signature=sig, 52 | body=body, 53 | url="https://mock.httpstatus.io/201", 54 | ) 55 | -------------------------------------------------------------------------------- /tests/test_queue.py: -------------------------------------------------------------------------------- 1 | from typing import Callable 2 | 3 | from qstash import QStash 4 | 5 | 6 | def test_queue( 7 | client: QStash, 8 | cleanup_queue: Callable[[QStash, str], None], 9 | ) -> None: 10 | name = "test_queue" 11 | cleanup_queue(client, name) 12 | 13 | client.queue.upsert(queue=name, parallelism=1) 14 | 15 | queue = client.queue.get(name) 16 | assert queue.name == name 17 | assert queue.parallelism == 1 18 | 19 | client.queue.upsert(queue=name, parallelism=2) 20 | 21 | queue = client.queue.get(name) 22 | assert queue.name == name 23 | assert queue.parallelism == 2 24 | 25 | all_queues = client.queue.list() 26 | assert any(True for q in all_queues if q.name == name) 27 | 28 | client.queue.delete(name) 29 | 30 | all_queues = client.queue.list() 31 | assert not any(True for q in all_queues if q.name == name) 32 | 33 | 34 | def test_queue_pause_resume( 35 | client: QStash, 36 | cleanup_queue: Callable[[QStash, str], None], 37 | ) -> None: 38 | name = "test_queue" 39 | cleanup_queue(client, name) 40 | 41 | client.queue.upsert(queue=name) 42 | 43 | queue = client.queue.get(name) 44 | assert queue.paused is False 45 | 46 | client.queue.pause(name) 47 | 48 | queue = client.queue.get(name) 49 | assert queue.paused is True 50 | 51 | client.queue.resume(name) 52 | 53 | queue = client.queue.get(name) 54 | assert queue.paused is False 55 | 56 | client.queue.upsert(name, paused=True) 57 | 58 | queue = client.queue.get(name) 59 | assert queue.paused is True 60 | 61 | client.queue.upsert(name, paused=False) 62 | 63 | queue = client.queue.get(name) 64 | assert queue.paused is False 65 | -------------------------------------------------------------------------------- /qstash/asyncio/client.py: -------------------------------------------------------------------------------- 1 | from os import environ 2 | from typing import Literal, Optional, Union 3 | 4 | from qstash.asyncio.dlq import AsyncDlqApi 5 | from qstash.asyncio.log import AsyncLogApi 6 | from qstash.asyncio.http import AsyncHttpClient 7 | from qstash.asyncio.message import AsyncMessageApi 8 | from qstash.asyncio.queue import AsyncQueueApi 9 | from qstash.asyncio.schedule import AsyncScheduleApi 10 | from qstash.asyncio.signing_key import AsyncSigningKeyApi 11 | from qstash.asyncio.url_group import AsyncUrlGroupApi 12 | from qstash.http import RetryConfig 13 | 14 | 15 | class AsyncQStash: 16 | def __init__( 17 | self, 18 | token: str, 19 | *, 20 | retry: Optional[Union[Literal[False], RetryConfig]] = None, 21 | base_url: Optional[str] = None, 22 | ) -> None: 23 | """ 24 | :param token: The authorization token from the Upstash console. 25 | :param retry: Configures how the client should retry requests. 26 | """ 27 | self.http = AsyncHttpClient( 28 | token, 29 | retry, 30 | base_url or environ.get("QSTASH_URL"), 31 | ) 32 | self.message = AsyncMessageApi(self.http) 33 | """Message api.""" 34 | 35 | self.url_group = AsyncUrlGroupApi(self.http) 36 | """Url group api.""" 37 | 38 | self.queue = AsyncQueueApi(self.http) 39 | """Queue api.""" 40 | 41 | self.schedule = AsyncScheduleApi(self.http) 42 | """Schedule api.""" 43 | 44 | self.signing_key = AsyncSigningKeyApi(self.http) 45 | """Signing key api.""" 46 | 47 | self.log = AsyncLogApi(self.http) 48 | """Log api.""" 49 | 50 | self.dlq = AsyncDlqApi(self.http) 51 | """Dlq (Dead Letter Queue) api.""" 52 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import os 3 | import time 4 | from typing import Callable, Coroutine 5 | 6 | import dotenv 7 | 8 | dotenv.load_dotenv() 9 | 10 | QSTASH_TOKEN = os.environ["QSTASH_TOKEN"] 11 | QSTASH_CURRENT_SIGNING_KEY = os.environ["QSTASH_CURRENT_SIGNING_KEY"] 12 | QSTASH_NEXT_SIGNING_KEY = os.environ["QSTASH_NEXT_SIGNING_KEY"] 13 | OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY") or "" 14 | 15 | 16 | def assert_eventually( 17 | assertion: Callable[[], None], 18 | initial_delay: float = 0, 19 | retry_delay: float = 0.5, 20 | timeout: float = 10.0, 21 | ) -> None: 22 | if initial_delay > 0: 23 | time.sleep(initial_delay) 24 | 25 | deadline = time.time() + timeout 26 | last_err = None 27 | 28 | while time.time() < deadline: 29 | try: 30 | assertion() 31 | return 32 | except AssertionError as e: 33 | last_err = e 34 | time.sleep(retry_delay) 35 | 36 | if last_err is None: 37 | raise AssertionError("Couldn't run the assertion") 38 | 39 | raise last_err 40 | 41 | 42 | async def assert_eventually_async( 43 | assertion: Callable[[], Coroutine[None, None, None]], 44 | initial_delay: float = 0, 45 | retry_delay: float = 0.5, 46 | timeout: float = 10.0, 47 | ) -> None: 48 | if initial_delay > 0: 49 | await asyncio.sleep(initial_delay) 50 | 51 | deadline = time.time() + timeout 52 | last_err = None 53 | 54 | while time.time() < deadline: 55 | try: 56 | await assertion() 57 | return 58 | except AssertionError as e: 59 | last_err = e 60 | await asyncio.sleep(retry_delay) 61 | 62 | if last_err is None: 63 | raise AssertionError("Couldn't run the assertion") 64 | 65 | raise last_err 66 | -------------------------------------------------------------------------------- /tests/asyncio/test_queue.py: -------------------------------------------------------------------------------- 1 | from typing import Callable 2 | 3 | import pytest 4 | 5 | from qstash import AsyncQStash 6 | 7 | 8 | @pytest.mark.asyncio 9 | async def test_queue_async( 10 | async_client: AsyncQStash, 11 | cleanup_queue_async: Callable[[AsyncQStash, str], None], 12 | ) -> None: 13 | name = "test_queue" 14 | cleanup_queue_async(async_client, name) 15 | 16 | await async_client.queue.upsert(queue=name, parallelism=1) 17 | 18 | queue = await async_client.queue.get(name) 19 | assert queue.name == name 20 | assert queue.parallelism == 1 21 | 22 | await async_client.queue.upsert(queue=name, parallelism=2) 23 | 24 | queue = await async_client.queue.get(name) 25 | assert queue.name == name 26 | assert queue.parallelism == 2 27 | 28 | all_queues = await async_client.queue.list() 29 | assert any(True for q in all_queues if q.name == name) 30 | 31 | await async_client.queue.delete(name) 32 | 33 | all_queues = await async_client.queue.list() 34 | assert not any(True for q in all_queues if q.name == name) 35 | 36 | 37 | @pytest.mark.asyncio 38 | async def test_queue_pause_resume_async( 39 | async_client: AsyncQStash, 40 | cleanup_queue_async: Callable[[AsyncQStash, str], None], 41 | ) -> None: 42 | name = "test_queue" 43 | cleanup_queue_async(async_client, name) 44 | 45 | await async_client.queue.upsert(queue=name) 46 | 47 | queue = await async_client.queue.get(name) 48 | assert queue.paused is False 49 | 50 | await async_client.queue.pause(name) 51 | 52 | queue = await async_client.queue.get(name) 53 | assert queue.paused is True 54 | 55 | await async_client.queue.resume(name) 56 | 57 | queue = await async_client.queue.get(name) 58 | assert queue.paused is False 59 | 60 | await async_client.queue.upsert(name, paused=True) 61 | 62 | queue = await async_client.queue.get(name) 63 | assert queue.paused is True 64 | 65 | await async_client.queue.upsert(name, paused=False) 66 | 67 | queue = await async_client.queue.get(name) 68 | assert queue.paused is False 69 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | 131 | # PyCharm project settings 132 | .idea 133 | 134 | # VS Code settings 135 | .vscode 136 | 137 | # Poetry lock 138 | poetry.lock 139 | 140 | # Ruff 141 | .ruff_cache 142 | -------------------------------------------------------------------------------- /qstash/asyncio/queue.py: -------------------------------------------------------------------------------- 1 | from typing import List 2 | 3 | from qstash.asyncio.http import AsyncHttpClient 4 | from qstash.queue import Queue, parse_queue_response, prepare_upsert_body 5 | 6 | 7 | class AsyncQueueApi: 8 | def __init__(self, http: AsyncHttpClient) -> None: 9 | self._http = http 10 | 11 | async def upsert( 12 | self, 13 | queue: str, 14 | *, 15 | parallelism: int = 1, 16 | paused: bool = False, 17 | ) -> None: 18 | """ 19 | Updates or creates a queue. 20 | 21 | :param queue: The name of the queue. 22 | :param parallelism: The number of parallel consumers consuming from the queue. 23 | :param paused: Whether to pause the queue or not. A paused queue will not 24 | deliver new messages until it is resumed. 25 | """ 26 | body = prepare_upsert_body(queue, parallelism, paused) 27 | 28 | await self._http.request( 29 | path="/v2/queues", 30 | method="POST", 31 | headers={"Content-Type": "application/json"}, 32 | body=body, 33 | parse_response=False, 34 | ) 35 | 36 | async def get(self, queue: str) -> Queue: 37 | """ 38 | Gets the queue by its name. 39 | """ 40 | response = await self._http.request( 41 | path=f"/v2/queues/{queue}", 42 | method="GET", 43 | ) 44 | 45 | return parse_queue_response(response) 46 | 47 | async def list(self) -> List[Queue]: 48 | """ 49 | Lists all the queues. 50 | """ 51 | response = await self._http.request( 52 | path="/v2/queues", 53 | method="GET", 54 | ) 55 | 56 | return [parse_queue_response(r) for r in response] 57 | 58 | async def delete(self, queue: str) -> None: 59 | """ 60 | Deletes the queue. 61 | """ 62 | await self._http.request( 63 | path=f"/v2/queues/{queue}", 64 | method="DELETE", 65 | parse_response=False, 66 | ) 67 | 68 | async def pause(self, queue: str) -> None: 69 | """ 70 | Pauses the queue. 71 | 72 | A paused queue will not deliver messages until 73 | it is resumed. 74 | """ 75 | await self._http.request( 76 | path=f"/v2/queues/{queue}/pause", 77 | method="POST", 78 | parse_response=False, 79 | ) 80 | 81 | async def resume(self, queue: str) -> None: 82 | """ 83 | Resumes the queue. 84 | """ 85 | await self._http.request( 86 | path=f"/v2/queues/{queue}/resume", 87 | method="POST", 88 | parse_response=False, 89 | ) 90 | -------------------------------------------------------------------------------- /qstash/asyncio/url_group.py: -------------------------------------------------------------------------------- 1 | from typing import List 2 | 3 | from qstash.asyncio.http import AsyncHttpClient 4 | from qstash.url_group import ( 5 | RemoveEndpointRequest, 6 | UpsertEndpointRequest, 7 | UrlGroup, 8 | parse_url_group_response, 9 | prepare_add_endpoints_body, 10 | prepare_remove_endpoints_body, 11 | ) 12 | 13 | 14 | class AsyncUrlGroupApi: 15 | def __init__(self, http: AsyncHttpClient): 16 | self._http = http 17 | 18 | async def upsert_endpoints( 19 | self, 20 | url_group: str, 21 | endpoints: List[UpsertEndpointRequest], 22 | ) -> None: 23 | """ 24 | Add or updates an endpoint to a url group. 25 | 26 | If the url group or the endpoint does not exist, it will be created. 27 | If the endpoint exists, it will be updated. 28 | """ 29 | body = prepare_add_endpoints_body(endpoints) 30 | 31 | await self._http.request( 32 | path=f"/v2/topics/{url_group}/endpoints", 33 | method="POST", 34 | headers={"Content-Type": "application/json"}, 35 | body=body, 36 | parse_response=False, 37 | ) 38 | 39 | async def remove_endpoints( 40 | self, 41 | url_group: str, 42 | endpoints: List[RemoveEndpointRequest], 43 | ) -> None: 44 | """ 45 | Remove one or more endpoints from a url group. 46 | 47 | If all endpoints have been removed, the url group will be deleted. 48 | """ 49 | body = prepare_remove_endpoints_body(endpoints) 50 | 51 | await self._http.request( 52 | path=f"/v2/topics/{url_group}/endpoints", 53 | method="DELETE", 54 | headers={"Content-Type": "application/json"}, 55 | body=body, 56 | parse_response=False, 57 | ) 58 | 59 | async def get(self, url_group: str) -> UrlGroup: 60 | """ 61 | Gets the url group by its name. 62 | """ 63 | response = await self._http.request( 64 | path=f"/v2/topics/{url_group}", 65 | method="GET", 66 | ) 67 | 68 | return parse_url_group_response(response) 69 | 70 | async def list(self) -> List[UrlGroup]: 71 | """ 72 | Lists all the url groups. 73 | """ 74 | response = await self._http.request( 75 | path="/v2/topics", 76 | method="GET", 77 | ) 78 | 79 | return [parse_url_group_response(r) for r in response] 80 | 81 | async def delete(self, url_group: str) -> None: 82 | """ 83 | Deletes the url group and all its endpoints. 84 | """ 85 | await self._http.request( 86 | path=f"/v2/topics/{url_group}", 87 | method="DELETE", 88 | parse_response=False, 89 | ) 90 | -------------------------------------------------------------------------------- /qstash/asyncio/dlq.py: -------------------------------------------------------------------------------- 1 | import json 2 | from typing import List, Optional 3 | 4 | from qstash.asyncio.http import AsyncHttpClient 5 | from qstash.dlq import ( 6 | DlqMessage, 7 | ListDlqMessagesResponse, 8 | parse_dlq_message_response, 9 | DlqFilter, 10 | prepare_list_dlq_messages_params, 11 | ) 12 | 13 | 14 | class AsyncDlqApi: 15 | def __init__(self, http: AsyncHttpClient) -> None: 16 | self._http = http 17 | 18 | async def get(self, dlq_id: str) -> DlqMessage: 19 | """ 20 | Gets a message from DLQ. 21 | 22 | :param dlq_id: The unique id within the DLQ to get. 23 | """ 24 | response = await self._http.request( 25 | path=f"/v2/dlq/{dlq_id}", 26 | method="GET", 27 | ) 28 | 29 | return parse_dlq_message_response(response, dlq_id) 30 | 31 | async def list( 32 | self, 33 | *, 34 | cursor: Optional[str] = None, 35 | count: Optional[int] = None, 36 | filter: Optional[DlqFilter] = None, 37 | ) -> ListDlqMessagesResponse: 38 | """ 39 | Lists all messages currently inside the DLQ. 40 | 41 | :param cursor: Optional cursor to start listing DLQ messages from. 42 | :param count: The maximum number of DLQ messages to return. 43 | Default and max is `100`. 44 | :param filter: Filter to use. 45 | """ 46 | params = prepare_list_dlq_messages_params( 47 | cursor=cursor, 48 | count=count, 49 | filter=filter, 50 | ) 51 | 52 | response = await self._http.request( 53 | path="/v2/dlq", 54 | method="GET", 55 | params=params, 56 | ) 57 | 58 | messages = [parse_dlq_message_response(r) for r in response["messages"]] 59 | 60 | return ListDlqMessagesResponse( 61 | cursor=response.get("cursor"), 62 | messages=messages, 63 | ) 64 | 65 | async def delete(self, dlq_id: str) -> None: 66 | """ 67 | Deletes a message from the DLQ. 68 | 69 | :param dlq_id: The unique id within the DLQ to delete. 70 | """ 71 | await self._http.request( 72 | path=f"/v2/dlq/{dlq_id}", 73 | method="DELETE", 74 | parse_response=False, 75 | ) 76 | 77 | async def delete_many(self, dlq_ids: List[str]) -> int: 78 | """ 79 | Deletes multiple messages from the DLQ and 80 | returns how many of them are deleted. 81 | 82 | :param dlq_ids: The unique ids within the DLQ to delete. 83 | """ 84 | body = json.dumps({"dlqIds": dlq_ids}) 85 | 86 | response = await self._http.request( 87 | path="/v2/dlq", 88 | method="DELETE", 89 | headers={"Content-Type": "application/json"}, 90 | body=body, 91 | ) 92 | 93 | return response["deleted"] # type:ignore[no-any-return] 94 | -------------------------------------------------------------------------------- /tests/test_dlq.py: -------------------------------------------------------------------------------- 1 | from qstash import QStash 2 | from qstash.message import PublishResponse 3 | from tests import assert_eventually 4 | 5 | 6 | def assert_failed_eventually(client: QStash, *msg_ids: str) -> None: 7 | def assertion() -> None: 8 | messages = client.dlq.list().messages 9 | 10 | matched_messages = [msg for msg in messages if msg.message_id in msg_ids] 11 | assert len(matched_messages) == len(msg_ids) 12 | 13 | for msg in matched_messages: 14 | dlq_msg = client.dlq.get(msg.dlq_id) 15 | assert dlq_msg.response_body == "404 Not Found" 16 | assert msg.response_body == "404 Not Found" 17 | assert dlq_msg.retry_delay_expression == "7000 * retried" 18 | assert msg.retry_delay_expression == "7000 * retried" 19 | 20 | if len(msg_ids) == 1: 21 | client.dlq.delete(matched_messages[0].dlq_id) 22 | else: 23 | deleted = client.dlq.delete_many([m.dlq_id for m in matched_messages]) 24 | assert deleted == len(msg_ids) 25 | 26 | messages = client.dlq.list().messages 27 | matched = any(True for msg in messages if msg.message_id in msg_ids) 28 | assert not matched 29 | 30 | assert_eventually( 31 | assertion, 32 | initial_delay=2.0, 33 | retry_delay=1.0, 34 | timeout=10.0, 35 | ) 36 | 37 | 38 | def test_dlq_get_and_delete(client: QStash) -> None: 39 | res = client.message.publish_json( 40 | method="GET", 41 | url="https://mock.httpstatus.io/404", 42 | retries=0, 43 | retry_delay="7000 * retried", 44 | ) 45 | 46 | assert isinstance(res, PublishResponse) 47 | 48 | assert_failed_eventually(client, res.message_id) 49 | 50 | 51 | def test_dlq_get_and_delete_many(client: QStash) -> None: 52 | msg_ids = [] 53 | for _ in range(5): 54 | res = client.message.publish_json( 55 | method="GET", 56 | url="https://mock.httpstatus.io/404", 57 | retries=0, 58 | retry_delay="7000 * retried", 59 | ) 60 | 61 | assert isinstance(res, PublishResponse) 62 | msg_ids.append(res.message_id) 63 | 64 | assert_failed_eventually(client, *msg_ids) 65 | 66 | 67 | def test_dlq_filter(client: QStash) -> None: 68 | res = client.message.publish_json( 69 | method="GET", 70 | url="https://mock.httpstatus.io/404", 71 | retries=0, 72 | retry_delay="7000 * retried", 73 | ) 74 | 75 | assert isinstance(res, PublishResponse) 76 | 77 | def assertion() -> None: 78 | messages = client.dlq.list( 79 | filter={"message_id": res.message_id}, 80 | count=1, 81 | ).messages 82 | 83 | assert len(messages) == 1 84 | assert messages[0].message_id == res.message_id 85 | assert messages[0].retry_delay_expression == "7000 * retried" 86 | 87 | client.dlq.delete(messages[0].dlq_id) 88 | 89 | assert_eventually( 90 | assertion, 91 | initial_delay=2.0, 92 | retry_delay=1.0, 93 | timeout=10.0, 94 | ) 95 | -------------------------------------------------------------------------------- /tests/asyncio/test_dlq.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from qstash import AsyncQStash 4 | from qstash.message import PublishResponse 5 | from tests import assert_eventually_async 6 | 7 | 8 | async def assert_failed_eventually_async( 9 | async_client: AsyncQStash, *msg_ids: str 10 | ) -> None: 11 | async def assertion() -> None: 12 | messages = (await async_client.dlq.list()).messages 13 | 14 | matched_messages = [msg for msg in messages if msg.message_id in msg_ids] 15 | assert len(matched_messages) == len(msg_ids) 16 | 17 | for msg in matched_messages: 18 | dlq_msg = await async_client.dlq.get(msg.dlq_id) 19 | assert dlq_msg.response_body == "404 Not Found" 20 | assert msg.response_body == "404 Not Found" 21 | 22 | if len(msg_ids) == 1: 23 | await async_client.dlq.delete(matched_messages[0].dlq_id) 24 | else: 25 | deleted = await async_client.dlq.delete_many( 26 | [m.dlq_id for m in matched_messages] 27 | ) 28 | assert deleted == len(msg_ids) 29 | 30 | messages = (await async_client.dlq.list()).messages 31 | matched = any(True for msg in messages if msg.message_id in msg_ids) 32 | assert not matched 33 | 34 | await assert_eventually_async( 35 | assertion, 36 | initial_delay=2.0, 37 | retry_delay=1.0, 38 | timeout=10.0, 39 | ) 40 | 41 | 42 | @pytest.mark.asyncio 43 | async def test_dlq_get_and_delete_async(async_client: AsyncQStash) -> None: 44 | res = await async_client.message.publish_json( 45 | method="GET", 46 | url="https://mock.httpstatus.io/404", 47 | retries=0, 48 | ) 49 | 50 | assert isinstance(res, PublishResponse) 51 | 52 | await assert_failed_eventually_async(async_client, res.message_id) 53 | 54 | 55 | @pytest.mark.asyncio 56 | async def test_dlq_get_and_delete_many_async(async_client: AsyncQStash) -> None: 57 | msg_ids = [] 58 | for _ in range(5): 59 | res = await async_client.message.publish_json( 60 | method="GET", 61 | url="https://mock.httpstatus.io/404", 62 | retries=0, 63 | ) 64 | 65 | assert isinstance(res, PublishResponse) 66 | msg_ids.append(res.message_id) 67 | 68 | await assert_failed_eventually_async(async_client, *msg_ids) 69 | 70 | 71 | @pytest.mark.asyncio 72 | async def test_dlq_filter_async(async_client: AsyncQStash) -> None: 73 | res = await async_client.message.publish_json( 74 | method="GET", 75 | url="https://mock.httpstatus.io/404", 76 | retries=0, 77 | ) 78 | 79 | assert isinstance(res, PublishResponse) 80 | 81 | async def assertion() -> None: 82 | messages = ( 83 | await async_client.dlq.list( 84 | filter={"message_id": res.message_id}, 85 | count=1, 86 | ) 87 | ).messages 88 | 89 | assert len(messages) == 1 90 | assert messages[0].message_id == res.message_id 91 | 92 | await async_client.dlq.delete(messages[0].dlq_id) 93 | 94 | await assert_eventually_async( 95 | assertion, 96 | initial_delay=2.0, 97 | retry_delay=1.0, 98 | timeout=10.0, 99 | ) 100 | -------------------------------------------------------------------------------- /tests/conftest.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | from typing import Callable 3 | 4 | import pytest 5 | import pytest_asyncio 6 | 7 | from qstash import QStash, AsyncQStash, Receiver 8 | from tests import QSTASH_TOKEN, QSTASH_CURRENT_SIGNING_KEY, QSTASH_NEXT_SIGNING_KEY 9 | 10 | 11 | @pytest.fixture 12 | def client() -> QStash: 13 | return QStash(token=QSTASH_TOKEN) 14 | 15 | 16 | @pytest_asyncio.fixture 17 | async def async_client() -> AsyncQStash: 18 | return AsyncQStash(token=QSTASH_TOKEN) 19 | 20 | 21 | @pytest.fixture 22 | def receiver() -> Receiver: 23 | return Receiver( 24 | current_signing_key=QSTASH_CURRENT_SIGNING_KEY, 25 | next_signing_key=QSTASH_NEXT_SIGNING_KEY, 26 | ) 27 | 28 | 29 | @pytest.fixture 30 | def cleanup_queue(request: pytest.FixtureRequest) -> Callable[[QStash, str], None]: 31 | queue_names = [] 32 | 33 | def register(client: QStash, queue_name: str) -> None: 34 | queue_names.append((client, queue_name)) 35 | 36 | def delete() -> None: 37 | for client, queue_name in queue_names: 38 | try: 39 | client.queue.delete(queue_name) 40 | except Exception: 41 | pass 42 | 43 | request.addfinalizer(delete) 44 | 45 | return register 46 | 47 | 48 | @pytest_asyncio.fixture 49 | def cleanup_queue_async( 50 | request: pytest.FixtureRequest, 51 | ) -> Callable[[AsyncQStash, str], None]: 52 | queue_names = [] 53 | 54 | def register(async_client: AsyncQStash, queue_name: str) -> None: 55 | queue_names.append((async_client, queue_name)) 56 | 57 | def finalizer() -> None: 58 | async def delete() -> None: 59 | for async_client, queue_name in queue_names: 60 | try: 61 | await async_client.queue.delete(queue_name) 62 | except Exception: 63 | pass 64 | 65 | asyncio.run(delete()) 66 | 67 | request.addfinalizer(finalizer) 68 | 69 | return register 70 | 71 | 72 | @pytest.fixture 73 | def cleanup_schedule(request: pytest.FixtureRequest) -> Callable[[QStash, str], None]: 74 | schedule_ids = [] 75 | 76 | def register(client: QStash, schedule_id: str) -> None: 77 | schedule_ids.append((client, schedule_id)) 78 | 79 | def delete() -> None: 80 | for client, schedule_id in schedule_ids: 81 | try: 82 | client.schedule.delete(schedule_id) 83 | except Exception: 84 | pass 85 | 86 | request.addfinalizer(delete) 87 | 88 | return register 89 | 90 | 91 | @pytest_asyncio.fixture 92 | def cleanup_schedule_async( 93 | request: pytest.FixtureRequest, 94 | ) -> Callable[[AsyncQStash, str], None]: 95 | schedule_ids = [] 96 | 97 | def register(async_client: AsyncQStash, schedule_id: str) -> None: 98 | schedule_ids.append((async_client, schedule_id)) 99 | 100 | def finalizer() -> None: 101 | async def delete() -> None: 102 | for async_client, schedule_id in schedule_ids: 103 | try: 104 | await async_client.schedule.delete(schedule_id) 105 | except Exception: 106 | pass 107 | 108 | asyncio.run(delete()) 109 | 110 | request.addfinalizer(finalizer) 111 | 112 | return register 113 | -------------------------------------------------------------------------------- /qstash/receiver.py: -------------------------------------------------------------------------------- 1 | import base64 2 | import hashlib 3 | from typing import Optional 4 | 5 | import jwt 6 | 7 | from qstash.errors import SignatureError 8 | 9 | 10 | def verify_with_key( 11 | key: str, 12 | *, 13 | signature: str, 14 | body: str, 15 | url: Optional[str] = None, 16 | clock_tolerance: int = 0, 17 | ) -> None: 18 | try: 19 | decoded = jwt.decode( 20 | signature, 21 | key, 22 | algorithms=["HS256"], 23 | issuer="Upstash", 24 | options={ 25 | "require": ["iss", "sub", "exp", "nbf"], 26 | "leeway": clock_tolerance, 27 | }, 28 | ) 29 | except jwt.ExpiredSignatureError: 30 | raise SignatureError("Signature has expired") 31 | except Exception as e: 32 | raise SignatureError(f"Error while decoding signature: {e}") 33 | 34 | if url is not None and decoded["sub"] != url: 35 | raise SignatureError(f"Invalid subject: {decoded['sub']}, want: {url}") 36 | 37 | body_hash = hashlib.sha256(body.encode()).digest() 38 | body_hash_b64 = base64.urlsafe_b64encode(body_hash).decode().rstrip("=") 39 | 40 | if decoded["body"].rstrip("=") != body_hash_b64: 41 | raise SignatureError( 42 | f"Invalid body hash: {decoded['body']}, want: {body_hash_b64}" 43 | ) 44 | 45 | 46 | class Receiver: 47 | """Receiver offers a simple way to verify the signature of a request.""" 48 | 49 | def __init__(self, current_signing_key: str, next_signing_key: str) -> None: 50 | """ 51 | :param current_signing_key: The current signing key. 52 | Get it from `https://console.upstash.com/qstash 53 | :param next_signing_key: The next signing key. 54 | Get it from `https://console.upstash.com/qstash 55 | """ 56 | self._current_signing_key = current_signing_key 57 | self._next_signing_key = next_signing_key 58 | 59 | def verify( 60 | self, 61 | *, 62 | signature: str, 63 | body: str, 64 | url: Optional[str] = None, 65 | clock_tolerance: int = 0, 66 | ) -> None: 67 | """ 68 | Verifies the signature of a request. 69 | 70 | Tries to verify the signature with the current signing key. 71 | If that fails, maybe because you have rotated the keys recently, it will 72 | try to verify the signature with the next signing key. 73 | 74 | If that fails, the signature is invalid and a `SignatureError` is thrown. 75 | 76 | :param signature: The signature from the `Upstash-Signature` header. 77 | :param body: The raw request body. 78 | :param url: Url of the endpoint where the request was sent to. 79 | When set to `None`, url is not check. 80 | :param clock_tolerance: Number of seconds to tolerate when checking 81 | `nbf` and `exp` claims, to deal with small clock differences 82 | among different servers. 83 | """ 84 | try: 85 | verify_with_key( 86 | self._current_signing_key, 87 | signature=signature, 88 | body=body, 89 | url=url, 90 | clock_tolerance=clock_tolerance, 91 | ) 92 | except SignatureError: 93 | verify_with_key( 94 | self._next_signing_key, 95 | signature=signature, 96 | body=body, 97 | url=url, 98 | clock_tolerance=clock_tolerance, 99 | ) 100 | -------------------------------------------------------------------------------- /qstash/queue.py: -------------------------------------------------------------------------------- 1 | import dataclasses 2 | import json 3 | from typing import Any, Dict, List 4 | 5 | from qstash.http import HttpClient 6 | 7 | 8 | @dataclasses.dataclass 9 | class Queue: 10 | name: str 11 | """Name of the queue.""" 12 | 13 | parallelism: int 14 | """Number of parallel consumers consuming from the queue.""" 15 | 16 | created_at: int 17 | """Creation time of the queue, in Unix milliseconds.""" 18 | 19 | updated_at: int 20 | """Last update time of the queue, in Unix milliseconds.""" 21 | 22 | lag: int 23 | """Number of unprocessed messages that exist in the queue.""" 24 | 25 | paused: bool 26 | """Whether the queue is paused or not.""" 27 | 28 | 29 | def prepare_upsert_body(queue: str, parallelism: int, paused: bool) -> str: 30 | return json.dumps( 31 | { 32 | "queueName": queue, 33 | "parallelism": parallelism, 34 | "paused": paused, 35 | } 36 | ) 37 | 38 | 39 | def parse_queue_response(response: Dict[str, Any]) -> Queue: 40 | return Queue( 41 | name=response["name"], 42 | parallelism=response["parallelism"], 43 | created_at=response["createdAt"], 44 | updated_at=response["updatedAt"], 45 | lag=response["lag"], 46 | paused=response["paused"], 47 | ) 48 | 49 | 50 | class QueueApi: 51 | def __init__(self, http: HttpClient) -> None: 52 | self._http = http 53 | 54 | def upsert( 55 | self, 56 | queue: str, 57 | *, 58 | parallelism: int = 1, 59 | paused: bool = False, 60 | ) -> None: 61 | """ 62 | Updates or creates a queue. 63 | 64 | :param queue: The name of the queue. 65 | :param parallelism: The number of parallel consumers consuming from the queue. 66 | :param paused: Whether to pause the queue or not. A paused queue will not 67 | deliver new messages until it is resumed. 68 | """ 69 | body = prepare_upsert_body(queue, parallelism, paused) 70 | 71 | self._http.request( 72 | path="/v2/queues", 73 | method="POST", 74 | headers={"Content-Type": "application/json"}, 75 | body=body, 76 | parse_response=False, 77 | ) 78 | 79 | def get(self, queue: str) -> Queue: 80 | """ 81 | Gets the queue by its name. 82 | """ 83 | response = self._http.request( 84 | path=f"/v2/queues/{queue}", 85 | method="GET", 86 | ) 87 | 88 | return parse_queue_response(response) 89 | 90 | def list(self) -> List[Queue]: 91 | """ 92 | Lists all the queues. 93 | """ 94 | response = self._http.request( 95 | path="/v2/queues", 96 | method="GET", 97 | ) 98 | 99 | return [parse_queue_response(r) for r in response] 100 | 101 | def delete(self, queue: str) -> None: 102 | """ 103 | Deletes the queue. 104 | """ 105 | self._http.request( 106 | path=f"/v2/queues/{queue}", 107 | method="DELETE", 108 | parse_response=False, 109 | ) 110 | 111 | def pause(self, queue: str) -> None: 112 | """ 113 | Pauses the queue. 114 | 115 | A paused queue will not deliver messages until 116 | it is resumed. 117 | """ 118 | self._http.request( 119 | path=f"/v2/queues/{queue}/pause", 120 | method="POST", 121 | parse_response=False, 122 | ) 123 | 124 | def resume(self, queue: str) -> None: 125 | """ 126 | Resumes the queue. 127 | """ 128 | self._http.request( 129 | path=f"/v2/queues/{queue}/resume", 130 | method="POST", 131 | parse_response=False, 132 | ) 133 | -------------------------------------------------------------------------------- /qstash/asyncio/http.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | from typing import Any, Dict, Literal, Optional, Union 3 | 4 | import httpx 5 | 6 | from qstash.http import ( 7 | BASE_URL, 8 | DEFAULT_RETRY, 9 | NO_RETRY, 10 | HttpMethod, 11 | RetryConfig, 12 | raise_for_non_ok_status, 13 | DEFAULT_TIMEOUT, 14 | ) 15 | 16 | 17 | class AsyncHttpClient: 18 | def __init__( 19 | self, 20 | token: str, 21 | retry: Optional[Union[Literal[False], RetryConfig]], 22 | base_url: Optional[str] = None, 23 | ) -> None: 24 | self._token = f"Bearer {token}" 25 | 26 | if retry is None: 27 | self._retry = DEFAULT_RETRY 28 | elif retry is False: 29 | self._retry = NO_RETRY 30 | else: 31 | self._retry = retry 32 | 33 | self._client = httpx.AsyncClient( 34 | timeout=DEFAULT_TIMEOUT, 35 | ) 36 | 37 | self._base_url = base_url.rstrip("/") if base_url else BASE_URL 38 | 39 | async def request( 40 | self, 41 | *, 42 | path: str, 43 | method: HttpMethod, 44 | headers: Optional[Dict[str, str]] = None, 45 | body: Optional[Union[str, bytes]] = None, 46 | params: Optional[Dict[str, str]] = None, 47 | parse_response: bool = True, 48 | base_url: Optional[str] = None, 49 | token: Optional[str] = None, 50 | ) -> Any: 51 | base_url = base_url or self._base_url 52 | token = token or self._token 53 | 54 | url = base_url + path 55 | headers = {"Authorization": token, **(headers or {})} 56 | 57 | max_attempts = 1 + max(0, self._retry["retries"]) 58 | last_error = None 59 | response = None 60 | for attempt in range(max_attempts): 61 | try: 62 | response = await self._client.request( 63 | method=method, 64 | url=url, 65 | params=params, 66 | headers=headers, 67 | content=body, 68 | ) 69 | break # Break the loop as soon as we receive a proper response 70 | except Exception as e: 71 | last_error = e 72 | backoff = self._retry["backoff"](attempt) / 1000 73 | await asyncio.sleep(backoff) 74 | 75 | if not response: 76 | # Can't be None at this point 77 | raise last_error # type:ignore[misc] 78 | 79 | raise_for_non_ok_status(response) 80 | 81 | if parse_response: 82 | return response.json() 83 | 84 | return response.text 85 | 86 | async def stream( 87 | self, 88 | *, 89 | path: str, 90 | method: HttpMethod, 91 | headers: Optional[Dict[str, str]] = None, 92 | body: Optional[Union[str, bytes]] = None, 93 | params: Optional[Dict[str, str]] = None, 94 | base_url: Optional[str] = None, 95 | token: Optional[str] = None, 96 | ) -> httpx.Response: 97 | base_url = base_url or self._base_url 98 | token = token or self._token 99 | 100 | url = base_url + path 101 | headers = {"Authorization": token, **(headers or {})} 102 | 103 | max_attempts = 1 + max(0, self._retry["retries"]) 104 | last_error = None 105 | response = None 106 | for attempt in range(max_attempts): 107 | try: 108 | request = self._client.build_request( 109 | method=method, 110 | url=url, 111 | params=params, 112 | headers=headers, 113 | content=body, 114 | ) 115 | response = await self._client.send( 116 | request, 117 | stream=True, 118 | ) 119 | break # Break the loop as soon as we receive a proper response 120 | except Exception as e: 121 | last_error = e 122 | backoff = self._retry["backoff"](attempt) / 1000 123 | await asyncio.sleep(backoff) 124 | 125 | if not response: 126 | # Can't be None at this point 127 | raise last_error # type:ignore[misc] 128 | 129 | try: 130 | raise_for_non_ok_status(response) 131 | except Exception as e: 132 | await response.aclose() 133 | raise e 134 | 135 | return response 136 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Upstash Python QStash SDK 2 | 3 | > [!NOTE] 4 | > **This project is in GA Stage.** 5 | > 6 | > The Upstash Professional Support fully covers this project. It receives regular updates, and bug fixes. 7 | > The Upstash team is committed to maintaining and improving its functionality. 8 | 9 | **QStash** is an HTTP based messaging and scheduling solution for serverless and edge runtimes. 10 | 11 | [QStash Documentation](https://upstash.com/docs/qstash) 12 | 13 | ### Install 14 | 15 | ```shell 16 | pip install qstash 17 | ``` 18 | 19 | ### Usage 20 | 21 | You can get your QStash token from the [Upstash Console](https://console.upstash.com/qstash). 22 | 23 | #### Publish a JSON message 24 | 25 | ```python 26 | from qstash import QStash 27 | 28 | client = QStash("") 29 | 30 | res = client.message.publish_json( 31 | url="https://example.com", 32 | body={"hello": "world"}, 33 | headers={ 34 | "test-header": "test-value", 35 | }, 36 | ) 37 | 38 | print(res.message_id) 39 | ``` 40 | 41 | #### [Create a scheduled message](https://upstash.com/docs/qstash/features/schedules) 42 | 43 | ```python 44 | from qstash import QStash 45 | 46 | client = QStash("") 47 | 48 | schedule_id = client.schedule.create( 49 | destination="https://example.com", 50 | cron="*/5 * * * *", 51 | ) 52 | 53 | print(schedule_id) 54 | ``` 55 | 56 | #### [Receiving messages](https://upstash.com/docs/qstash/howto/receiving) 57 | 58 | ```python 59 | from qstash import Receiver 60 | 61 | # Keys available from the QStash console 62 | receiver = Receiver( 63 | current_signing_key="CURRENT_SIGNING_KEY", 64 | next_signing_key="NEXT_SIGNING_KEY", 65 | ) 66 | 67 | # ... in your request handler 68 | 69 | signature, body = req.headers["Upstash-Signature"], req.body 70 | 71 | receiver.verify( 72 | body=body, 73 | signature=signature, 74 | url="https://example.com", # Optional 75 | ) 76 | ``` 77 | 78 | #### Publish a JSON message to LLM Using Custom Providers 79 | 80 | ```python 81 | from qstash import QStash 82 | from qstash.chat import openai 83 | 84 | client = QStash("") 85 | 86 | res = client.message.publish_json( 87 | api={"name": "llm", "provider": openai("")}, 88 | body={ 89 | "model": "gpt-3.5-turbo", 90 | "messages": [ 91 | { 92 | "role": "user", 93 | "content": "What is the capital of Turkey?", 94 | } 95 | ], 96 | }, 97 | callback="https://example-cb.com", 98 | ) 99 | 100 | print(res.message_id) 101 | ``` 102 | 103 | #### Additional configuration 104 | 105 | ```python 106 | from qstash import QStash 107 | 108 | # Create a client with a custom retry configuration. This is 109 | # for sending messages to QStash, not for sending messages to 110 | # your endpoints. 111 | # The default configuration is: 112 | # { 113 | # "retries": 5, 114 | # "backoff": lambda retry_count: math.exp(retry_count) * 50, 115 | # } 116 | client = QStash( 117 | token="", 118 | retry={ 119 | "retries": 1, 120 | "backoff": lambda retry_count: (2 ** retry_count) * 20, 121 | }, 122 | ) 123 | 124 | # Publish to URL 125 | client.message.publish_json( 126 | url="https://example.com", 127 | body={"key": "value"}, 128 | # Retry sending message to API 3 times 129 | # https://upstash.com/docs/qstash/features/retry 130 | retries=3, 131 | # Schedule message to be sent 4 seconds from now 132 | delay="4s", 133 | # When message is sent, send a request to this URL 134 | # https://upstash.com/docs/qstash/features/callbacks 135 | callback="https://example.com/callback", 136 | # When message fails to send, send a request to this URL 137 | failure_callback="https://example.com/failure_callback", 138 | # Headers to forward to the endpoint 139 | headers={ 140 | "test-header": "test-value", 141 | }, 142 | # Enable content-based deduplication 143 | # https://upstash.com/docs/qstash/features/deduplication#content-based-deduplication 144 | content_based_deduplication=True, 145 | ) 146 | ``` 147 | 148 | Additional methods are available for managing url groups, schedules, and messages. See the examples folder for more. 149 | 150 | ### Development 151 | 152 | 1. Clone the repository 153 | 2. Install [Poetry](https://python-poetry.org/docs/#installation) 154 | 3. Install dependencies with `poetry install` 155 | 4. Create a .env file with `cp .env.example .env` and fill in the `QSTASH_TOKEN` 156 | 5. Run tests with `poetry run pytest` 157 | 6. Format with `poetry run ruff format .` 158 | -------------------------------------------------------------------------------- /qstash/url_group.py: -------------------------------------------------------------------------------- 1 | import dataclasses 2 | import json 3 | from typing import Any, Dict, List, Optional, TypedDict 4 | 5 | from qstash.errors import QStashError 6 | from qstash.http import HttpClient 7 | 8 | 9 | class UpsertEndpointRequest(TypedDict, total=False): 10 | url: str 11 | """Url of the endpoint""" 12 | 13 | name: str 14 | """Optional name of the endpoint""" 15 | 16 | 17 | class RemoveEndpointRequest(TypedDict, total=False): 18 | url: str 19 | """Url of the endpoint""" 20 | 21 | name: str 22 | """Name of the endpoint""" 23 | 24 | 25 | @dataclasses.dataclass 26 | class Endpoint: 27 | url: str 28 | """Url of the endpoint""" 29 | 30 | name: Optional[str] 31 | """Name of the endpoint""" 32 | 33 | 34 | @dataclasses.dataclass 35 | class UrlGroup: 36 | name: str 37 | """Name of the url group.""" 38 | 39 | created_at: int 40 | """Creation time of the url group, in Unix milliseconds.""" 41 | 42 | updated_at: int 43 | """Last update time of the url group, in Unix milliseconds.""" 44 | 45 | endpoints: List[Endpoint] 46 | """List of endpoints.""" 47 | 48 | 49 | def prepare_add_endpoints_body( 50 | endpoints: List[UpsertEndpointRequest], 51 | ) -> str: 52 | for e in endpoints: 53 | if "url" not in e: 54 | raise QStashError("`url` of the endpoint must be provided.") 55 | 56 | return json.dumps( 57 | { 58 | "endpoints": endpoints, 59 | } 60 | ) 61 | 62 | 63 | def prepare_remove_endpoints_body( 64 | endpoints: List[RemoveEndpointRequest], 65 | ) -> str: 66 | for e in endpoints: 67 | if "url" not in e and "name" not in e: 68 | raise QStashError( 69 | "One of `url` or `name` of the endpoint must be provided." 70 | ) 71 | 72 | return json.dumps( 73 | { 74 | "endpoints": endpoints, 75 | } 76 | ) 77 | 78 | 79 | def parse_url_group_response(response: Dict[str, Any]) -> UrlGroup: 80 | endpoints = [] 81 | for e in response["endpoints"]: 82 | endpoints.append( 83 | Endpoint( 84 | url=e["url"], 85 | name=e.get("name"), 86 | ) 87 | ) 88 | 89 | return UrlGroup( 90 | name=response["name"], 91 | created_at=response["createdAt"], 92 | updated_at=response["updatedAt"], 93 | endpoints=endpoints, 94 | ) 95 | 96 | 97 | class UrlGroupApi: 98 | def __init__(self, http: HttpClient) -> None: 99 | self._http = http 100 | 101 | def upsert_endpoints( 102 | self, 103 | url_group: str, 104 | endpoints: List[UpsertEndpointRequest], 105 | ) -> None: 106 | """ 107 | Add or updates an endpoint to a url group. 108 | 109 | If the url group or the endpoint does not exist, it will be created. 110 | If the endpoint exists, it will be updated. 111 | """ 112 | body = prepare_add_endpoints_body(endpoints) 113 | 114 | self._http.request( 115 | path=f"/v2/topics/{url_group}/endpoints", 116 | method="POST", 117 | headers={"Content-Type": "application/json"}, 118 | body=body, 119 | parse_response=False, 120 | ) 121 | 122 | def remove_endpoints( 123 | self, 124 | url_group: str, 125 | endpoints: List[RemoveEndpointRequest], 126 | ) -> None: 127 | """ 128 | Remove one or more endpoints from a url group. 129 | 130 | If all endpoints have been removed, the url group will be deleted. 131 | """ 132 | body = prepare_remove_endpoints_body(endpoints) 133 | 134 | self._http.request( 135 | path=f"/v2/topics/{url_group}/endpoints", 136 | method="DELETE", 137 | headers={"Content-Type": "application/json"}, 138 | body=body, 139 | parse_response=False, 140 | ) 141 | 142 | def get(self, url_group: str) -> UrlGroup: 143 | """ 144 | Gets the url group by its name. 145 | """ 146 | response = self._http.request( 147 | path=f"/v2/topics/{url_group}", 148 | method="GET", 149 | ) 150 | 151 | return parse_url_group_response(response) 152 | 153 | def list(self) -> List[UrlGroup]: 154 | """ 155 | Lists all the url groups. 156 | """ 157 | response = self._http.request( 158 | path="/v2/topics", 159 | method="GET", 160 | ) 161 | 162 | return [parse_url_group_response(r) for r in response] 163 | 164 | def delete(self, url_group: str) -> None: 165 | """ 166 | Deletes the url group and all its endpoints. 167 | """ 168 | self._http.request( 169 | path=f"/v2/topics/{url_group}", 170 | method="DELETE", 171 | parse_response=False, 172 | ) 173 | -------------------------------------------------------------------------------- /tests/test_schedules.py: -------------------------------------------------------------------------------- 1 | from typing import Callable 2 | 3 | import pytest 4 | 5 | from qstash import QStash 6 | from qstash.message import FlowControl 7 | 8 | 9 | @pytest.fixture 10 | def cleanup_schedule(request: pytest.FixtureRequest) -> Callable[[QStash, str], None]: 11 | schedule_ids = [] 12 | 13 | def register(client: QStash, schedule_id: str) -> None: 14 | schedule_ids.append((client, schedule_id)) 15 | 16 | def delete() -> None: 17 | for client, schedule_id in schedule_ids: 18 | try: 19 | client.schedule.delete(schedule_id) 20 | except Exception: 21 | pass 22 | 23 | request.addfinalizer(delete) 24 | 25 | return register 26 | 27 | 28 | def test_schedule_lifecycle( 29 | client: QStash, 30 | cleanup_schedule: Callable[[QStash, str], None], 31 | ) -> None: 32 | schedule_id = client.schedule.create_json( 33 | cron="1 1 1 1 1", 34 | destination="https://mock.httpstatus.io/200", 35 | body={"ex_key": "ex_value"}, 36 | retry_delay="5000 * retried", 37 | ) 38 | 39 | cleanup_schedule(client, schedule_id) 40 | 41 | assert len(schedule_id) > 0 42 | 43 | res = client.schedule.get(schedule_id) 44 | assert res.schedule_id == schedule_id 45 | assert res.cron == "1 1 1 1 1" 46 | assert res.retry_delay_expression == "5000 * retried" 47 | 48 | list_res = client.schedule.list() 49 | assert any(s.schedule_id == schedule_id for s in list_res) 50 | 51 | client.schedule.delete(schedule_id) 52 | 53 | list_res = client.schedule.list() 54 | assert not any(s.schedule_id == schedule_id for s in list_res) 55 | 56 | 57 | def test_schedule_pause_resume( 58 | client: QStash, 59 | cleanup_schedule: Callable[[QStash, str], None], 60 | ) -> None: 61 | schedule_id = client.schedule.create_json( 62 | cron="1 1 1 1 1", 63 | destination="https://mock.httpstatus.io/200", 64 | body={"ex_key": "ex_value"}, 65 | ) 66 | 67 | cleanup_schedule(client, schedule_id) 68 | 69 | assert len(schedule_id) > 0 70 | 71 | res = client.schedule.get(schedule_id) 72 | assert res.schedule_id == schedule_id 73 | assert res.cron == "1 1 1 1 1" 74 | assert res.paused is False 75 | 76 | client.schedule.pause(schedule_id) 77 | 78 | res = client.schedule.get(schedule_id) 79 | assert res.paused is True 80 | 81 | client.schedule.resume(schedule_id) 82 | 83 | res = client.schedule.get(schedule_id) 84 | assert res.paused is False 85 | 86 | 87 | def test_schedule_with_flow_control( 88 | client: QStash, cleanup_schedule: Callable[[QStash, str], None] 89 | ) -> None: 90 | schedule_id = client.schedule.create_json( 91 | cron="1 1 1 1 1", 92 | destination="https://mock.httpstatus.io/200", 93 | body={"ex_key": "ex_value"}, 94 | flow_control=FlowControl(key="flow-key", parallelism=2), 95 | ) 96 | cleanup_schedule(client, schedule_id) 97 | 98 | schedule = client.schedule.get(schedule_id) 99 | 100 | flow_control = schedule.flow_control 101 | assert flow_control is not None 102 | assert flow_control.key == "flow-key" 103 | assert flow_control.parallelism == 2 104 | assert flow_control.rate is None 105 | assert flow_control.period == 1 106 | 107 | 108 | def test_schedule_enqueue( 109 | client: QStash, 110 | cleanup_schedule: Callable[[QStash, str], None], 111 | ) -> None: 112 | schedule_id = client.schedule.create_json( 113 | cron="1 1 1 1 1", 114 | destination="https://mock.httpstatus.io/200", 115 | body={"key": "value"}, 116 | queue="schedule-queue", 117 | ) 118 | cleanup_schedule(client, schedule_id) 119 | 120 | schedule = client.schedule.get(schedule_id) 121 | 122 | assert schedule.queue == "schedule-queue" 123 | 124 | 125 | def test_schedule_with_label( 126 | client: QStash, 127 | cleanup_schedule: Callable[[QStash, str], None], 128 | ) -> None: 129 | schedule_id = client.schedule.create( 130 | cron="*/5 * * * *", 131 | destination="https://mock.httpstatus.io/200", 132 | body="test-schedule-with-label", 133 | label="test-schedule-label", 134 | ) 135 | 136 | cleanup_schedule(client, schedule_id) 137 | 138 | assert len(schedule_id) > 0 139 | 140 | # Verify the schedule has the label 141 | res = client.schedule.get(schedule_id) 142 | assert res.schedule_id == schedule_id 143 | assert res.label == "test-schedule-label" 144 | 145 | 146 | def test_schedule_json_with_label( 147 | client: QStash, 148 | cleanup_schedule: Callable[[QStash, str], None], 149 | ) -> None: 150 | schedule_id = client.schedule.create_json( 151 | cron="*/10 * * * *", 152 | destination="https://mock.httpstatus.io/200", 153 | body={"schedule": "json-test", "with_label": True}, 154 | label="test-schedule-json-label", 155 | ) 156 | 157 | cleanup_schedule(client, schedule_id) 158 | 159 | assert len(schedule_id) > 0 160 | 161 | # Verify the schedule has the label 162 | res = client.schedule.get(schedule_id) 163 | assert res.schedule_id == schedule_id 164 | assert res.label == "test-schedule-json-label" 165 | -------------------------------------------------------------------------------- /tests/asyncio/test_schedules.py: -------------------------------------------------------------------------------- 1 | from typing import Callable 2 | 3 | import pytest 4 | 5 | from qstash import AsyncQStash 6 | from qstash.message import FlowControl 7 | 8 | 9 | @pytest.mark.asyncio 10 | async def test_schedule_lifecycle_async( 11 | async_client: AsyncQStash, 12 | cleanup_schedule_async: Callable[[AsyncQStash, str], None], 13 | ) -> None: 14 | schedule_id = await async_client.schedule.create_json( 15 | cron="1 1 1 1 1", 16 | destination="https://mock.httpstatus.io/200", 17 | body={"ex_key": "ex_value"}, 18 | ) 19 | 20 | cleanup_schedule_async(async_client, schedule_id) 21 | 22 | assert len(schedule_id) > 0 23 | 24 | res = await async_client.schedule.get(schedule_id) 25 | assert res.schedule_id == schedule_id 26 | assert res.cron == "1 1 1 1 1" 27 | 28 | list_res = await async_client.schedule.list() 29 | assert any(s.schedule_id == schedule_id for s in list_res) 30 | 31 | await async_client.schedule.delete(schedule_id) 32 | 33 | list_res = await async_client.schedule.list() 34 | assert not any(s.schedule_id == schedule_id for s in list_res) 35 | 36 | 37 | @pytest.mark.asyncio 38 | async def test_schedule_pause_resume_async( 39 | async_client: AsyncQStash, 40 | cleanup_schedule_async: Callable[[AsyncQStash, str], None], 41 | ) -> None: 42 | schedule_id = await async_client.schedule.create_json( 43 | cron="1 1 1 1 1", 44 | destination="https://mock.httpstatus.io/200", 45 | body={"ex_key": "ex_value"}, 46 | ) 47 | 48 | cleanup_schedule_async(async_client, schedule_id) 49 | 50 | assert len(schedule_id) > 0 51 | 52 | res = await async_client.schedule.get(schedule_id) 53 | assert res.schedule_id == schedule_id 54 | assert res.cron == "1 1 1 1 1" 55 | assert res.paused is False 56 | 57 | await async_client.schedule.pause(schedule_id) 58 | 59 | res = await async_client.schedule.get(schedule_id) 60 | assert res.paused is True 61 | 62 | await async_client.schedule.resume(schedule_id) 63 | 64 | res = await async_client.schedule.get(schedule_id) 65 | assert res.paused is False 66 | 67 | 68 | @pytest.mark.asyncio 69 | async def test_schedule_with_flow_control_async( 70 | async_client: AsyncQStash, 71 | cleanup_schedule_async: Callable[[AsyncQStash, str], None], 72 | ) -> None: 73 | schedule_id = await async_client.schedule.create_json( 74 | cron="1 1 1 1 1", 75 | destination="https://mock.httpstatus.io/200", 76 | body={"ex_key": "ex_value"}, 77 | flow_control=FlowControl(key="flow-key", parallelism=2), 78 | ) 79 | cleanup_schedule_async(async_client, schedule_id) 80 | 81 | schedule = await async_client.schedule.get(schedule_id) 82 | 83 | flow_control = schedule.flow_control 84 | assert flow_control is not None 85 | assert flow_control.key == "flow-key" 86 | assert flow_control.parallelism == 2 87 | assert flow_control.rate is None 88 | assert flow_control.period == 1 89 | 90 | 91 | @pytest.mark.asyncio 92 | async def test_schedule_enqueue_async( 93 | async_client: AsyncQStash, 94 | cleanup_schedule_async: Callable[[AsyncQStash, str], None], 95 | ) -> None: 96 | schedule_id = await async_client.schedule.create_json( 97 | cron="1 1 1 1 1", 98 | destination="https://mock.httpstatus.io/200", 99 | body={"key": "value"}, 100 | queue="schedule-queue", 101 | ) 102 | cleanup_schedule_async(async_client, schedule_id) 103 | 104 | schedule = await async_client.schedule.get(schedule_id) 105 | 106 | assert schedule.queue == "schedule-queue" 107 | 108 | 109 | @pytest.mark.asyncio 110 | async def test_schedule_with_label_async( 111 | async_client: AsyncQStash, 112 | cleanup_schedule_async: Callable[[AsyncQStash, str], None], 113 | ) -> None: 114 | schedule_id = await async_client.schedule.create( 115 | cron="*/5 * * * *", 116 | destination="https://mock.httpstatus.io/200", 117 | body="test-async-schedule-with-label", 118 | label="test-async-schedule-label", 119 | ) 120 | 121 | cleanup_schedule_async(async_client, schedule_id) 122 | 123 | assert len(schedule_id) > 0 124 | 125 | # Verify the schedule has the label 126 | res = await async_client.schedule.get(schedule_id) 127 | assert res.schedule_id == schedule_id 128 | assert res.label == "test-async-schedule-label" 129 | 130 | 131 | @pytest.mark.asyncio 132 | async def test_schedule_json_with_label_async( 133 | async_client: AsyncQStash, 134 | cleanup_schedule_async: Callable[[AsyncQStash, str], None], 135 | ) -> None: 136 | schedule_id = await async_client.schedule.create_json( 137 | cron="*/10 * * * *", 138 | destination="https://mock.httpstatus.io/200", 139 | body={"schedule": "async-json-test", "with_label": True}, 140 | label="test-async-schedule-json-label", 141 | ) 142 | 143 | cleanup_schedule_async(async_client, schedule_id) 144 | 145 | assert len(schedule_id) > 0 146 | 147 | # Verify the schedule has the label 148 | res = await async_client.schedule.get(schedule_id) 149 | assert res.schedule_id == schedule_id 150 | assert res.label == "test-async-schedule-json-label" 151 | -------------------------------------------------------------------------------- /qstash/http.py: -------------------------------------------------------------------------------- 1 | import math 2 | import time 3 | from typing import TypedDict, Callable, Optional, Union, Literal, Any, Dict 4 | 5 | import httpx 6 | 7 | from qstash.errors import ( 8 | RateLimitExceededError, 9 | QStashError, 10 | DailyMessageLimitExceededError, 11 | ) 12 | 13 | 14 | class RetryConfig(TypedDict, total=False): 15 | retries: int 16 | """Maximum number of retries will be performed after the initial request fails.""" 17 | 18 | backoff: Callable[[int], float] 19 | """A function that returns how many milliseconds to backoff before the given retry attempt.""" 20 | 21 | 22 | DEFAULT_TIMEOUT = httpx.Timeout( 23 | timeout=600.0, 24 | connect=5.0, 25 | ) 26 | 27 | DEFAULT_RETRY = RetryConfig( 28 | retries=5, 29 | backoff=lambda retry_count: math.exp(1 + retry_count) * 50, 30 | ) 31 | 32 | NO_RETRY = RetryConfig( 33 | retries=0, 34 | backoff=lambda _: 0, 35 | ) 36 | 37 | BASE_URL = "https://qstash.upstash.io" 38 | 39 | HttpMethod = Literal["GET", "POST", "PUT", "DELETE", "PATCH"] 40 | 41 | 42 | def daily_message_limit_error(headers: httpx.Headers) -> DailyMessageLimitExceededError: 43 | limit = headers.get("RateLimit-Limit") 44 | remaining = headers.get("RateLimit-Remaining") 45 | reset = headers.get("RateLimit-Reset") 46 | return DailyMessageLimitExceededError( 47 | limit=limit, 48 | remaining=remaining, 49 | reset=reset, 50 | ) 51 | 52 | 53 | def burst_rate_limit_error(headers: httpx.Headers) -> RateLimitExceededError: 54 | limit = headers.get("Burst-RateLimit-Limit") 55 | remaining = headers.get("Burst-RateLimit-Remaining") 56 | reset = headers.get("Burst-RateLimit-Reset") 57 | return RateLimitExceededError( 58 | limit=limit, 59 | remaining=remaining, 60 | reset=reset, 61 | ) 62 | 63 | 64 | def raise_for_non_ok_status(response: httpx.Response) -> None: 65 | if response.is_success: 66 | return 67 | 68 | if response.status_code == 429: 69 | headers = response.headers 70 | if "RateLimit-Limit" in headers: 71 | raise daily_message_limit_error(headers) 72 | else: 73 | raise burst_rate_limit_error(headers) 74 | 75 | raise QStashError( 76 | f"Request failed with status: {response.status_code}, body: {response.text}" 77 | ) 78 | 79 | 80 | class HttpClient: 81 | def __init__( 82 | self, 83 | token: str, 84 | retry: Optional[Union[Literal[False], RetryConfig]], 85 | base_url: Optional[str] = None, 86 | ) -> None: 87 | self._token = f"Bearer {token}" 88 | 89 | if retry is None: 90 | self._retry = DEFAULT_RETRY 91 | elif retry is False: 92 | self._retry = NO_RETRY 93 | else: 94 | self._retry = retry 95 | 96 | self._client = httpx.Client( 97 | timeout=DEFAULT_TIMEOUT, 98 | ) 99 | 100 | self._base_url = base_url.rstrip("/") if base_url else BASE_URL 101 | 102 | def request( 103 | self, 104 | *, 105 | path: str, 106 | method: HttpMethod, 107 | headers: Optional[Dict[str, str]] = None, 108 | body: Optional[Union[str, bytes]] = None, 109 | params: Optional[Dict[str, str]] = None, 110 | parse_response: bool = True, 111 | base_url: Optional[str] = None, 112 | token: Optional[str] = None, 113 | ) -> Any: 114 | base_url = base_url or self._base_url 115 | token = token or self._token 116 | 117 | url = base_url + path 118 | headers = {"Authorization": token, **(headers or {})} 119 | 120 | max_attempts = 1 + max(0, self._retry["retries"]) 121 | last_error = None 122 | response = None 123 | for attempt in range(max_attempts): 124 | try: 125 | response = self._client.request( 126 | method=method, 127 | url=url, 128 | params=params, 129 | headers=headers, 130 | content=body, 131 | ) 132 | break # Break the loop as soon as we receive a proper response 133 | except Exception as e: 134 | last_error = e 135 | backoff = self._retry["backoff"](attempt) / 1000 136 | time.sleep(backoff) 137 | 138 | if not response: 139 | # Can't be None at this point 140 | raise last_error # type:ignore[misc] 141 | 142 | raise_for_non_ok_status(response) 143 | 144 | if parse_response: 145 | return response.json() 146 | 147 | return response.text 148 | 149 | def stream( 150 | self, 151 | *, 152 | path: str, 153 | method: HttpMethod, 154 | headers: Optional[Dict[str, str]] = None, 155 | body: Optional[Union[str, bytes]] = None, 156 | params: Optional[Dict[str, str]] = None, 157 | base_url: Optional[str] = None, 158 | token: Optional[str] = None, 159 | ) -> httpx.Response: 160 | base_url = base_url or self._base_url 161 | token = token or self._token 162 | 163 | url = base_url + path 164 | headers = {"Authorization": token, **(headers or {})} 165 | 166 | max_attempts = 1 + max(0, self._retry["retries"]) 167 | last_error = None 168 | response = None 169 | for attempt in range(max_attempts): 170 | try: 171 | request = self._client.build_request( 172 | method=method, 173 | url=url, 174 | params=params, 175 | headers=headers, 176 | content=body, 177 | ) 178 | response = self._client.send( 179 | request, 180 | stream=True, 181 | ) 182 | break # Break the loop as soon as we receive a proper response 183 | except Exception as e: 184 | last_error = e 185 | backoff = self._retry["backoff"](attempt) / 1000 186 | time.sleep(backoff) 187 | 188 | if not response: 189 | # Can't be None at this point 190 | raise last_error # type:ignore[misc] 191 | 192 | try: 193 | raise_for_non_ok_status(response) 194 | except Exception as e: 195 | response.close() 196 | raise e 197 | 198 | return response 199 | -------------------------------------------------------------------------------- /qstash/dlq.py: -------------------------------------------------------------------------------- 1 | import dataclasses 2 | import json 3 | from typing import Any, Dict, List, Optional, TypedDict 4 | 5 | from qstash.http import HttpClient 6 | from qstash.message import Message, parse_flow_control 7 | 8 | 9 | @dataclasses.dataclass 10 | class DlqMessage(Message): 11 | dlq_id: str 12 | """Unique id within the DLQ.""" 13 | 14 | response_status: int 15 | """HTTP status code of the last failed delivery attempt.""" 16 | 17 | response_headers: Optional[Dict[str, List[str]]] 18 | """Response headers of the last failed delivery attempt.""" 19 | 20 | response_body: Optional[str] 21 | """ 22 | Response body of the last failed delivery attempt if it is 23 | composed of UTF-8 characters only. 24 | """ 25 | 26 | response_body_base64: Optional[str] 27 | """ 28 | Base64 encoded response body of the last failed delivery attempt 29 | if the response body contains non-UTF-8 characters. 30 | """ 31 | 32 | retry_delay_expression: Optional[str] 33 | """ 34 | The retry delay expression for this DLQ message, 35 | if retry_delay was set when publishing the message. 36 | """ 37 | 38 | 39 | class DlqFilter(TypedDict, total=False): 40 | message_id: str 41 | """Filter DLQ entries by message id.""" 42 | 43 | url: str 44 | """Filter DLQ entries by url.""" 45 | 46 | url_group: str 47 | """Filter DLQ entries by url group name.""" 48 | 49 | queue: str 50 | """Filter DLQ entries by queue name.""" 51 | 52 | schedule_id: str 53 | """Filter DLQ entries by schedule id.""" 54 | 55 | from_time: int 56 | """Filter DLQ entries by starting Unix time, in milliseconds""" 57 | 58 | to_time: int 59 | """Filter DLQ entries by ending Unix time, in milliseconds""" 60 | 61 | response_status: int 62 | """Filter DLQ entries by HTTP status of the response""" 63 | 64 | caller_ip: str 65 | """Filter DLQ entries by IP address of the publisher of the message""" 66 | 67 | label: str 68 | """Filter DLQ entries by label.""" 69 | 70 | 71 | @dataclasses.dataclass 72 | class ListDlqMessagesResponse: 73 | cursor: Optional[str] 74 | """ 75 | A cursor which can be used in subsequent requests to paginate through 76 | all messages. If `None`, end of the DLQ messages are reached. 77 | """ 78 | 79 | messages: List[DlqMessage] 80 | """List of DLQ messages.""" 81 | 82 | 83 | def parse_dlq_message_response( 84 | response: Dict[str, Any], 85 | dlq_id: str = "", 86 | ) -> DlqMessage: 87 | flow_control = parse_flow_control(response) 88 | return DlqMessage( 89 | message_id=response["messageId"], 90 | url=response["url"], 91 | url_group=response.get("topicName"), 92 | endpoint=response.get("endpointName"), 93 | queue=response.get("queueName"), 94 | body=response.get("body"), 95 | body_base64=response.get("bodyBase64"), 96 | method=response["method"], 97 | headers=response.get("header"), 98 | callback_headers=response.get("callbackHeader"), 99 | failure_callback_headers=response.get("failureCallbackHeader"), 100 | max_retries=response["maxRetries"], 101 | not_before=response["notBefore"], 102 | created_at=response["createdAt"], 103 | callback=response.get("callback"), 104 | failure_callback=response.get("failureCallback"), 105 | schedule_id=response.get("scheduleId"), 106 | caller_ip=response.get("callerIP"), 107 | dlq_id=response.get("dlqId", dlq_id), 108 | response_status=response["responseStatus"], 109 | response_headers=response.get("responseHeader"), 110 | response_body=response.get("responseBody"), 111 | response_body_base64=response.get("responseBodyBase64"), 112 | flow_control=flow_control, 113 | retry_delay_expression=response.get("retryDelayExpression"), 114 | label=response.get("label"), 115 | ) 116 | 117 | 118 | def prepare_list_dlq_messages_params( 119 | *, 120 | cursor: Optional[str], 121 | count: Optional[int], 122 | filter: Optional[DlqFilter], 123 | ) -> Dict[str, str]: 124 | params = {} 125 | 126 | if cursor is not None: 127 | params["cursor"] = cursor 128 | 129 | if count is not None: 130 | params["count"] = str(count) 131 | 132 | if filter is not None: 133 | if "message_id" in filter: 134 | params["messageId"] = filter["message_id"] 135 | 136 | if "url" in filter: 137 | params["url"] = filter["url"] 138 | 139 | if "url_group" in filter: 140 | params["topicName"] = filter["url_group"] 141 | 142 | if "queue" in filter: 143 | params["queueName"] = filter["queue"] 144 | 145 | if "schedule_id" in filter: 146 | params["scheduleId"] = filter["schedule_id"] 147 | 148 | if "from_time" in filter: 149 | params["fromDate"] = str(filter["from_time"]) 150 | 151 | if "to_time" in filter: 152 | params["toDate"] = str(filter["to_time"]) 153 | 154 | if "response_status" in filter: 155 | params["responseStatus"] = str(filter["response_status"]) 156 | 157 | if "caller_ip" in filter: 158 | params["callerIp"] = filter["caller_ip"] 159 | 160 | if "label" in filter: 161 | params["label"] = filter["label"] 162 | 163 | return params 164 | 165 | 166 | class DlqApi: 167 | def __init__(self, http: HttpClient) -> None: 168 | self._http = http 169 | 170 | def get(self, dlq_id: str) -> DlqMessage: 171 | """ 172 | Gets a message from DLQ. 173 | 174 | :param dlq_id: The unique id within the DLQ to get. 175 | """ 176 | response = self._http.request( 177 | path=f"/v2/dlq/{dlq_id}", 178 | method="GET", 179 | ) 180 | 181 | return parse_dlq_message_response(response, dlq_id) 182 | 183 | def list( 184 | self, 185 | *, 186 | cursor: Optional[str] = None, 187 | count: Optional[int] = None, 188 | filter: Optional[DlqFilter] = None, 189 | ) -> ListDlqMessagesResponse: 190 | """ 191 | Lists all messages currently inside the DLQ. 192 | 193 | :param cursor: Optional cursor to start listing DLQ messages from. 194 | :param count: The maximum number of DLQ messages to return. 195 | Default and max is `100`. 196 | :param filter: Filter to use. 197 | """ 198 | params = prepare_list_dlq_messages_params( 199 | cursor=cursor, 200 | count=count, 201 | filter=filter, 202 | ) 203 | 204 | response = self._http.request( 205 | path="/v2/dlq", 206 | method="GET", 207 | params=params, 208 | ) 209 | 210 | messages = [parse_dlq_message_response(r) for r in response["messages"]] 211 | 212 | return ListDlqMessagesResponse( 213 | cursor=response.get("cursor"), 214 | messages=messages, 215 | ) 216 | 217 | def delete(self, dlq_id: str) -> None: 218 | """ 219 | Deletes a message from the DLQ. 220 | 221 | :param dlq_id: The unique id within the DLQ to delete. 222 | """ 223 | self._http.request( 224 | path=f"/v2/dlq/{dlq_id}", 225 | method="DELETE", 226 | parse_response=False, 227 | ) 228 | 229 | def delete_many(self, dlq_ids: List[str]) -> int: 230 | """ 231 | Deletes multiple messages from the DLQ and 232 | returns how many of them are deleted. 233 | 234 | :param dlq_ids: The unique ids within the DLQ to delete. 235 | """ 236 | body = json.dumps({"dlqIds": dlq_ids}) 237 | 238 | response = self._http.request( 239 | path="/v2/dlq", 240 | method="DELETE", 241 | headers={"Content-Type": "application/json"}, 242 | body=body, 243 | ) 244 | 245 | return response["deleted"] # type:ignore[no-any-return] 246 | -------------------------------------------------------------------------------- /qstash/log.py: -------------------------------------------------------------------------------- 1 | import dataclasses 2 | import enum 3 | from typing import Any, Dict, List, Optional, TypedDict 4 | 5 | from qstash.http import HttpClient, HttpMethod 6 | from qstash.message import parse_flow_control, FlowControlProperties 7 | 8 | 9 | class LogState(enum.Enum): 10 | """State of the message.""" 11 | 12 | CREATED = "CREATED" 13 | """Message has been accepted and stored in QStash""" 14 | 15 | ACTIVE = "ACTIVE" 16 | """Task is currently being processed by a worker.""" 17 | 18 | RETRY = "RETRY" 19 | """Task has been scheduled to retry.""" 20 | 21 | ERROR = "ERROR" 22 | """ 23 | Execution threw an error and the task is waiting to be retried 24 | or failed. 25 | """ 26 | 27 | DELIVERED = "DELIVERED" 28 | """Message was successfully delivered.""" 29 | 30 | FAILED = "FAILED" 31 | """ 32 | Task has failed too many times or encountered an error that it 33 | cannot recover from. 34 | """ 35 | 36 | CANCEL_REQUESTED = "CANCEL_REQUESTED" 37 | """Cancel request from the user is recorded.""" 38 | 39 | CANCELED = "CANCELED" 40 | """Cancel request from the user is honored.""" 41 | 42 | IN_PROGRESS = "IN_PROGRESS" 43 | """Messages which are in progress""" 44 | 45 | 46 | @dataclasses.dataclass 47 | class Log: 48 | time: int 49 | """Unix time of the log entry, in milliseconds.""" 50 | 51 | message_id: str 52 | """Message id associated with the log.""" 53 | 54 | state: LogState 55 | """Current state of the message at this point in time.""" 56 | 57 | error: Optional[str] 58 | """An explanation what went wrong.""" 59 | 60 | next_delivery_time: Optional[int] 61 | """Next scheduled Unix time of the message, milliseconds.""" 62 | 63 | url: str 64 | """Destination url.""" 65 | 66 | url_group: Optional[str] 67 | """Name of the url group if this message was sent through a url group.""" 68 | 69 | endpoint: Optional[str] 70 | """Name of the endpoint if this message was sent through a url group.""" 71 | 72 | queue: Optional[str] 73 | """Name of the queue if this message is enqueued on a queue.""" 74 | 75 | schedule_id: Optional[str] 76 | """Schedule id of the message if the message is triggered by a schedule.""" 77 | 78 | body_base64: Optional[str] 79 | """Base64 encoded body of the message.""" 80 | 81 | headers: Optional[Dict[str, List[str]]] 82 | """Headers of the message""" 83 | 84 | callback_headers: Optional[Dict[str, List[str]]] 85 | """Headers of the callback message""" 86 | 87 | failure_callback_headers: Optional[Dict[str, List[str]]] 88 | """Headers of the failure callback message""" 89 | 90 | response_status: Optional[int] 91 | """HTTP status code of the last failed delivery attempt.""" 92 | 93 | response_headers: Optional[Dict[str, List[str]]] 94 | """Response headers of the last failed delivery attempt.""" 95 | 96 | response_body: Optional[str] 97 | """Response body of the last failed delivery attempt.""" 98 | 99 | timeout: Optional[int] 100 | """HTTP timeout value used while calling the destination url.""" 101 | 102 | method: Optional[HttpMethod] 103 | """HTTP method to use to deliver the message.""" 104 | 105 | callback: Optional[str] 106 | """Url which is called each time the message is attempted to be delivered.""" 107 | 108 | failure_callback: Optional[str] 109 | """Url which is called after the message is failed.""" 110 | 111 | max_retries: Optional[int] 112 | """Number of retries that should be attempted in case of delivery failure.""" 113 | 114 | retry_delay_expression: Optional[str] 115 | """ 116 | The retry delay expression for this DLQ message, 117 | if retry_delay was set when publishing the message. 118 | """ 119 | 120 | flow_control: Optional[FlowControlProperties] 121 | """Flow control properties""" 122 | 123 | label: Optional[str] 124 | """Label assigned to the request for filtering logs.""" 125 | 126 | 127 | class LogFilter(TypedDict, total=False): 128 | message_id: str 129 | """Filter logs by message id.""" 130 | 131 | message_ids: List[str] 132 | """Filter logs by message ids.""" 133 | 134 | state: LogState 135 | """Filter logs by state.""" 136 | 137 | url: str 138 | """Filter logs by url.""" 139 | 140 | url_group: str 141 | """Filter logs by url group name.""" 142 | 143 | queue: str 144 | """Filter logs by queue name.""" 145 | 146 | schedule_id: str 147 | """Filter logs by schedule id.""" 148 | 149 | from_time: int 150 | """Filter logs by starting Unix time, in milliseconds""" 151 | 152 | to_time: int 153 | """Filter logs by ending Unix time, in milliseconds""" 154 | 155 | label: str 156 | """Filter logs by label.""" 157 | 158 | 159 | @dataclasses.dataclass 160 | class ListLogsResponse: 161 | cursor: Optional[str] 162 | """ 163 | A cursor which can be used in subsequent requests to paginate through 164 | all logs. If `None`, end of the logs are reached. 165 | """ 166 | 167 | logs: List[Log] 168 | """List of logs.""" 169 | 170 | 171 | def prepare_list_logs_request_params( 172 | *, 173 | cursor: Optional[str], 174 | count: Optional[int], 175 | filter: Optional[LogFilter], 176 | ) -> Dict[str, str]: 177 | params: Dict[str, Any] = {} 178 | 179 | if cursor is not None: 180 | params["cursor"] = cursor 181 | 182 | if count is not None: 183 | params["count"] = str(count) 184 | 185 | if filter is not None: 186 | if "message_id" in filter: 187 | params["messageId"] = filter["message_id"] 188 | 189 | if "message_ids" in filter: 190 | params["messageIds"] = filter["message_ids"] 191 | 192 | if "state" in filter: 193 | params["state"] = filter["state"].value 194 | 195 | if "url" in filter: 196 | params["url"] = filter["url"] 197 | 198 | if "url_group" in filter: 199 | params["topicName"] = filter["url_group"] 200 | 201 | if "queue" in filter: 202 | params["queueName"] = filter["queue"] 203 | 204 | if "schedule_id" in filter: 205 | params["scheduleId"] = filter["schedule_id"] 206 | 207 | if "from_time" in filter: 208 | params["fromDate"] = str(filter["from_time"]) 209 | 210 | if "to_time" in filter: 211 | params["toDate"] = str(filter["to_time"]) 212 | 213 | if "label" in filter: 214 | params["label"] = filter["label"] 215 | 216 | return params 217 | 218 | 219 | def parse_logs_response(response: List[Dict[str, Any]]) -> List[Log]: 220 | logs = [] 221 | 222 | for event in response: 223 | flow_control = parse_flow_control(event) 224 | logs.append( 225 | Log( 226 | time=event["time"], 227 | message_id=event["messageId"], 228 | state=LogState(event["state"]), 229 | error=event.get("error"), 230 | next_delivery_time=event.get("nextDeliveryTime"), 231 | url=event["url"], 232 | url_group=event.get("topicName"), 233 | endpoint=event.get("endpointName"), 234 | queue=event.get("queueName"), 235 | schedule_id=event.get("scheduleId"), 236 | headers=event.get("header"), 237 | callback_headers=event.get("callbackHeaders"), 238 | failure_callback_headers=event.get("failureCallbackHeaders"), 239 | body_base64=event.get("body"), 240 | response_status=event.get("responseStatus"), 241 | response_headers=event.get("responseHeader"), 242 | response_body=event.get("responseBody"), 243 | timeout=event.get("timeout"), 244 | callback=event.get("callback"), 245 | failure_callback=event.get("failureCallback"), 246 | flow_control=flow_control, 247 | method=event.get("method"), 248 | max_retries=event.get("maxRetries"), 249 | retry_delay_expression=event.get("retryDelayExpression"), 250 | label=event.get("label"), 251 | ) 252 | ) 253 | 254 | return logs 255 | 256 | 257 | class LogApi: 258 | def __init__(self, http: HttpClient) -> None: 259 | self._http = http 260 | 261 | def list( 262 | self, 263 | *, 264 | cursor: Optional[str] = None, 265 | count: Optional[int] = None, 266 | filter: Optional[LogFilter] = None, 267 | ) -> ListLogsResponse: 268 | """ 269 | Lists all logs that happened, such as message creation or delivery. 270 | 271 | :param cursor: Optional cursor to start listing logs from. 272 | :param count: The maximum number of logs to return. 273 | Default and max is `1000`. 274 | :param filter: Filter to use. 275 | """ 276 | params = prepare_list_logs_request_params( 277 | cursor=cursor, 278 | count=count, 279 | filter=filter, 280 | ) 281 | 282 | response = self._http.request( 283 | path="/v2/events", 284 | method="GET", 285 | params=params, 286 | ) 287 | 288 | logs = parse_logs_response(response["events"]) 289 | 290 | return ListLogsResponse( 291 | cursor=response.get("cursor"), 292 | logs=logs, 293 | ) 294 | -------------------------------------------------------------------------------- /qstash/asyncio/schedule.py: -------------------------------------------------------------------------------- 1 | import json 2 | from typing import Any, Dict, List, Optional, Union 3 | 4 | from qstash.asyncio.http import AsyncHttpClient 5 | from qstash.http import HttpMethod 6 | from qstash.message import FlowControl 7 | from qstash.schedule import ( 8 | Schedule, 9 | parse_schedule_response, 10 | prepare_schedule_headers, 11 | ) 12 | 13 | 14 | class AsyncScheduleApi: 15 | def __init__(self, http: AsyncHttpClient) -> None: 16 | self._http = http 17 | 18 | async def create( 19 | self, 20 | *, 21 | destination: str, 22 | cron: str, 23 | body: Optional[Union[str, bytes]] = None, 24 | content_type: Optional[str] = None, 25 | method: Optional[HttpMethod] = None, 26 | headers: Optional[Dict[str, str]] = None, 27 | callback_headers: Optional[Dict[str, str]] = None, 28 | failure_callback_headers: Optional[Dict[str, str]] = None, 29 | retries: Optional[int] = None, 30 | retry_delay: Optional[str] = None, 31 | callback: Optional[str] = None, 32 | failure_callback: Optional[str] = None, 33 | delay: Optional[Union[str, int]] = None, 34 | timeout: Optional[Union[str, int]] = None, 35 | schedule_id: Optional[str] = None, 36 | queue: Optional[str] = None, 37 | flow_control: Optional[FlowControl] = None, 38 | label: Optional[str] = None, 39 | ) -> str: 40 | """ 41 | Creates a schedule to send messages periodically. 42 | 43 | Returns the created schedule id. 44 | 45 | :param destination: The destination url or url group. 46 | :param cron: The cron expression to use to schedule the messages. 47 | :param body: The raw request message body passed to the destination as is. 48 | :param content_type: MIME type of the message. 49 | :param method: The HTTP method to use when sending a webhook to your API. 50 | :param headers: Headers to forward along with the message. 51 | :param callback_headers: Headers to forward along with the callback message. 52 | :param failure_callback_headers: Headers to forward along with the failure 53 | callback message. 54 | :param retries: How often should this message be retried in case the destination 55 | API is not available. 56 | :param retry_delay: Delay between retries. 57 | 58 | By default, the `retryDelay` is exponential backoff. 59 | More details can be found in: https://upstash.com/docs/qstash/features/retry. 60 | 61 | The `retryDelay` option allows you to customize the delay (in milliseconds) between retry attempts when message delivery fails. 62 | 63 | You can use mathematical expressions and the following built-in functions to calculate the delay dynamically. 64 | The special variable `retried` represents the current retry attempt count (starting from 0). 65 | 66 | Supported functions: 67 | - `pow` 68 | - `sqrt` 69 | - `abs` 70 | - `exp` 71 | - `floor` 72 | - `ceil` 73 | - `round` 74 | - `min` 75 | - `max` 76 | 77 | Examples of valid `retryDelay` values: 78 | ```py 79 | 1000 # 1 second 80 | 1000 * (1 + retried) # 1 second multiplied by the current retry attempt 81 | pow(2, retried) # 2 to the power of the current retry attempt 82 | max(10, pow(2, retried)) # The greater of 10 or 2^retried 83 | ``` 84 | :param callback: A callback url that will be called after each attempt. 85 | :param failure_callback: A failure callback url that will be called when a delivery 86 | is failed, that is when all the defined retries are exhausted. 87 | :param delay: Delay the message delivery. The format for the delay string is a 88 | number followed by duration abbreviation, like `10s`. Available durations 89 | are `s` (seconds), `m` (minutes), `h` (hours), and `d` (days). As convenience, 90 | it is also possible to specify the delay as an integer, which will be 91 | interpreted as delay in seconds. 92 | :param timeout: The HTTP timeout value to use while calling the destination URL. 93 | When a timeout is specified, it will be used instead of the maximum timeout 94 | value permitted by the QStash plan. It is useful in scenarios, where a message 95 | should be delivered with a shorter timeout. 96 | :param schedule_id: Schedule id to use. This can be used to update the settings 97 | of an existing schedule. 98 | :param queue: Name of the queue which the scheduled messages will be enqueued. 99 | :param flow_control: Settings for controlling the number of active requests, 100 | as well as the rate of requests with the same flow control key. 101 | :param label: Assign a label to the request to filter logs with it later. 102 | """ 103 | req_headers = prepare_schedule_headers( 104 | cron=cron, 105 | content_type=content_type, 106 | method=method, 107 | headers=headers, 108 | callback_headers=callback_headers, 109 | failure_callback_headers=failure_callback_headers, 110 | retries=retries, 111 | retry_delay=retry_delay, 112 | callback=callback, 113 | failure_callback=failure_callback, 114 | delay=delay, 115 | timeout=timeout, 116 | schedule_id=schedule_id, 117 | queue=queue, 118 | flow_control=flow_control, 119 | label=label, 120 | ) 121 | 122 | response = await self._http.request( 123 | path=f"/v2/schedules/{destination}", 124 | method="POST", 125 | headers=req_headers, 126 | body=body, 127 | ) 128 | 129 | return response["scheduleId"] # type:ignore[no-any-return] 130 | 131 | async def create_json( 132 | self, 133 | *, 134 | destination: str, 135 | cron: str, 136 | body: Optional[Any] = None, 137 | method: Optional[HttpMethod] = None, 138 | headers: Optional[Dict[str, str]] = None, 139 | callback_headers: Optional[Dict[str, str]] = None, 140 | failure_callback_headers: Optional[Dict[str, str]] = None, 141 | retries: Optional[int] = None, 142 | retry_delay: Optional[str] = None, 143 | callback: Optional[str] = None, 144 | failure_callback: Optional[str] = None, 145 | delay: Optional[Union[str, int]] = None, 146 | timeout: Optional[Union[str, int]] = None, 147 | schedule_id: Optional[str] = None, 148 | queue: Optional[str] = None, 149 | flow_control: Optional[FlowControl] = None, 150 | label: Optional[str] = None, 151 | ) -> str: 152 | """ 153 | Creates a schedule to send messages periodically, automatically serializing the 154 | body as JSON string, and setting content type to `application/json`. 155 | 156 | Returns the created schedule id. 157 | 158 | :param destination: The destination url or url group. 159 | :param cron: The cron expression to use to schedule the messages. 160 | :param body: The request message body passed to the destination after being 161 | serialized as JSON string. 162 | :param method: The HTTP method to use when sending a webhook to your API. 163 | :param headers: Headers to forward along with the message. 164 | :param callback_headers: Headers to forward along with the callback message. 165 | :param failure_callback_headers: Headers to forward along with the failure 166 | callback message. 167 | :param retries: How often should this message be retried in case the destination 168 | API is not available. 169 | :param retry_delay: Delay between retries. 170 | 171 | By default, the `retryDelay` is exponential backoff. 172 | More details can be found in: https://upstash.com/docs/qstash/features/retry. 173 | 174 | The `retryDelay` option allows you to customize the delay (in milliseconds) between retry attempts when message delivery fails. 175 | 176 | You can use mathematical expressions and the following built-in functions to calculate the delay dynamically. 177 | The special variable `retried` represents the current retry attempt count (starting from 0). 178 | 179 | Supported functions: 180 | - `pow` 181 | - `sqrt` 182 | - `abs` 183 | - `exp` 184 | - `floor` 185 | - `ceil` 186 | - `round` 187 | - `min` 188 | - `max` 189 | 190 | Examples of valid `retryDelay` values: 191 | ```py 192 | 1000 # 1 second 193 | 1000 * (1 + retried) # 1 second multiplied by the current retry attempt 194 | pow(2, retried) # 2 to the power of the current retry attempt 195 | max(10, pow(2, retried)) # The greater of 10 or 2^retried 196 | ``` 197 | :param callback: A callback url that will be called after each attempt. 198 | :param failure_callback: A failure callback url that will be called when a delivery 199 | is failed, that is when all the defined retries are exhausted. 200 | :param delay: Delay the message delivery. The format for the delay string is a 201 | number followed by duration abbreviation, like `10s`. Available durations 202 | are `s` (seconds), `m` (minutes), `h` (hours), and `d` (days). As convenience, 203 | it is also possible to specify the delay as an integer, which will be 204 | interpreted as delay in seconds. 205 | :param timeout: The HTTP timeout value to use while calling the destination URL. 206 | When a timeout is specified, it will be used instead of the maximum timeout 207 | value permitted by the QStash plan. It is useful in scenarios, where a message 208 | should be delivered with a shorter timeout. 209 | :param schedule_id: Schedule id to use. This can be used to update the settings 210 | of an existing schedule. 211 | :param queue: Name of the queue which the scheduled messages will be enqueued. 212 | :param flow_control: Settings for controlling the number of active requests, 213 | as well as the rate of requests with the same flow control key. 214 | :param label: Assign a label to the request to filter logs with it later. 215 | """ 216 | return await self.create( 217 | destination=destination, 218 | cron=cron, 219 | body=json.dumps(body), 220 | content_type="application/json", 221 | method=method, 222 | headers=headers, 223 | callback_headers=callback_headers, 224 | failure_callback_headers=failure_callback_headers, 225 | retries=retries, 226 | retry_delay=retry_delay, 227 | callback=callback, 228 | failure_callback=failure_callback, 229 | delay=delay, 230 | timeout=timeout, 231 | schedule_id=schedule_id, 232 | queue=queue, 233 | flow_control=flow_control, 234 | label=label, 235 | ) 236 | 237 | async def get(self, schedule_id: str) -> Schedule: 238 | """ 239 | Gets the schedule by its id. 240 | """ 241 | response = await self._http.request( 242 | path=f"/v2/schedules/{schedule_id}", 243 | method="GET", 244 | ) 245 | 246 | return parse_schedule_response(response) 247 | 248 | async def list(self) -> List[Schedule]: 249 | """ 250 | Lists all the schedules. 251 | """ 252 | response = await self._http.request( 253 | path="/v2/schedules", 254 | method="GET", 255 | ) 256 | 257 | return [parse_schedule_response(r) for r in response] 258 | 259 | async def delete(self, schedule_id: str) -> None: 260 | """ 261 | Deletes the schedule. 262 | """ 263 | await self._http.request( 264 | path=f"/v2/schedules/{schedule_id}", 265 | method="DELETE", 266 | parse_response=False, 267 | ) 268 | 269 | async def pause(self, schedule_id: str) -> None: 270 | """ 271 | Pauses the schedule. 272 | 273 | A paused schedule will not produce new messages until 274 | it is resumed. 275 | """ 276 | await self._http.request( 277 | path=f"/v2/schedules/{schedule_id}/pause", 278 | method="PATCH", 279 | parse_response=False, 280 | ) 281 | 282 | async def resume(self, schedule_id: str) -> None: 283 | """ 284 | Resumes the schedule. 285 | """ 286 | await self._http.request( 287 | path=f"/v2/schedules/{schedule_id}/resume", 288 | method="PATCH", 289 | parse_response=False, 290 | ) 291 | -------------------------------------------------------------------------------- /tests/test_message.py: -------------------------------------------------------------------------------- 1 | from typing import Callable 2 | 3 | import pytest 4 | 5 | from qstash import QStash 6 | from qstash.chat import openai 7 | from qstash.errors import QStashError 8 | from qstash.log import LogState 9 | from qstash.message import ( 10 | BatchJsonRequest, 11 | BatchRequest, 12 | BatchResponse, 13 | EnqueueResponse, 14 | PublishResponse, 15 | FlowControl, 16 | ) 17 | from tests import assert_eventually, OPENAI_API_KEY 18 | 19 | 20 | def assert_delivered_eventually(client: QStash, msg_id: str) -> None: 21 | def assertion() -> None: 22 | logs = client.log.list( 23 | filter={ 24 | "message_id": msg_id, 25 | "state": LogState.DELIVERED, 26 | } 27 | ).logs 28 | 29 | assert len(logs) == 1 30 | 31 | assert_eventually( 32 | assertion, 33 | initial_delay=1.0, 34 | retry_delay=1.0, 35 | timeout=60.0, 36 | ) 37 | 38 | 39 | def test_publish_to_url(client: QStash) -> None: 40 | res = client.message.publish( 41 | body="test-body", 42 | method="GET", 43 | url="https://mock.httpstatus.io/200", 44 | headers={ 45 | "test-header": "test-value", 46 | }, 47 | ) 48 | 49 | assert isinstance(res, PublishResponse) 50 | assert len(res.message_id) > 0 51 | 52 | assert_delivered_eventually(client, res.message_id) 53 | 54 | 55 | def test_message_log_retry_delay_expression(client: QStash) -> None: 56 | res = client.message.publish( 57 | body="test-body", 58 | method="GET", 59 | url="https://mock.httpstatus.io/200", 60 | headers={"test-header": "test-value"}, 61 | retry_delay="15000", 62 | ) 63 | assert isinstance(res, PublishResponse) 64 | assert len(res.message_id) > 0 65 | # Wait for delivery and log 66 | assert_delivered_eventually(client, res.message_id) 67 | logs = client.log.list(filter={"message_id": res.message_id}).logs 68 | assert logs[0].retry_delay_expression == "15000" 69 | 70 | 71 | def test_publish_to_url_json(client: QStash) -> None: 72 | res = client.message.publish_json( 73 | body={"ex_key": "ex_value"}, 74 | method="GET", 75 | url="https://mock.httpstatus.io/200", 76 | headers={ 77 | "test-header": "test-value", 78 | }, 79 | ) 80 | 81 | assert isinstance(res, PublishResponse) 82 | assert len(res.message_id) > 0 83 | 84 | assert_delivered_eventually(client, res.message_id) 85 | 86 | 87 | def test_disallow_multiple_destinations(client: QStash) -> None: 88 | with pytest.raises(QStashError): 89 | client.message.publish_json( 90 | method="GET", 91 | url="https://mock.httpstatus.io/200", 92 | url_group="test-url-group", 93 | ) 94 | 95 | with pytest.raises(QStashError): 96 | client.message.publish_json( 97 | method="GET", 98 | url="https://mock.httpstatus.io/200", 99 | api={"name": "llm", "provider": openai(OPENAI_API_KEY)}, 100 | ) 101 | 102 | with pytest.raises(QStashError): 103 | client.message.publish_json( 104 | url_group="test-url-group", 105 | api={"name": "llm", "provider": openai(OPENAI_API_KEY)}, 106 | ) 107 | 108 | 109 | def test_batch(client: QStash) -> None: 110 | N = 3 111 | messages = [] 112 | for i in range(N): 113 | messages.append( 114 | BatchRequest( 115 | body=f"hi {i}", 116 | method="GET", 117 | url="https://mock.httpstatus.io/200", 118 | retries=0, 119 | headers={ 120 | f"test-header-{i}": f"test-value-{i}", 121 | "content-type": "text/plain", 122 | }, 123 | ) 124 | ) 125 | 126 | res = client.message.batch(messages) 127 | 128 | assert len(res) == N 129 | 130 | for r in res: 131 | assert isinstance(r, BatchResponse) 132 | assert len(r.message_id) > 0 133 | 134 | 135 | def test_batch_json(client: QStash) -> None: 136 | N = 3 137 | messages = [] 138 | for i in range(N): 139 | messages.append( 140 | BatchJsonRequest( 141 | body={"hi": i}, 142 | method="GET", 143 | url="https://mock.httpstatus.io/200", 144 | retries=0, 145 | headers={ 146 | f"test-header-{i}": f"test-value-{i}", 147 | }, 148 | ) 149 | ) 150 | 151 | res = client.message.batch_json(messages) 152 | 153 | assert len(res) == N 154 | 155 | for r in res: 156 | assert isinstance(r, BatchResponse) 157 | assert len(r.message_id) > 0 158 | 159 | 160 | def test_publish_to_api_llm(client: QStash) -> None: 161 | res = client.message.publish_json( 162 | api={"name": "llm", "provider": openai(OPENAI_API_KEY)}, 163 | body={ 164 | "model": "gpt-3.5-turbo", 165 | "messages": [ 166 | { 167 | "role": "user", 168 | "content": "just say hello", 169 | } 170 | ], 171 | }, 172 | callback="https://mock.httpstatus.io/200", 173 | ) 174 | 175 | assert isinstance(res, PublishResponse) 176 | assert len(res.message_id) > 0 177 | 178 | assert_delivered_eventually(client, res.message_id) 179 | 180 | 181 | def test_batch_api_llm(client: QStash) -> None: 182 | res = client.message.batch_json( 183 | [ 184 | { 185 | "api": {"name": "llm", "provider": openai(OPENAI_API_KEY)}, 186 | "body": { 187 | "model": "gpt-3.5-turbo", 188 | "messages": [ 189 | { 190 | "role": "user", 191 | "content": "just say hello", 192 | } 193 | ], 194 | }, 195 | "callback": "https://mock.httpstatus.io/200", 196 | }, 197 | ] 198 | ) 199 | 200 | assert len(res) == 1 201 | 202 | assert isinstance(res[0], BatchResponse) 203 | assert len(res[0].message_id) > 0 204 | 205 | assert_delivered_eventually(client, res[0].message_id) 206 | 207 | 208 | def test_enqueue( 209 | client: QStash, 210 | cleanup_queue: Callable[[QStash, str], None], 211 | ) -> None: 212 | name = "test_queue" 213 | cleanup_queue(client, name) 214 | 215 | res = client.message.enqueue( 216 | queue=name, 217 | body="test-body", 218 | method="GET", 219 | url="https://mock.httpstatus.io/200", 220 | headers={ 221 | "test-header": "test-value", 222 | }, 223 | ) 224 | 225 | assert isinstance(res, EnqueueResponse) 226 | 227 | assert len(res.message_id) > 0 228 | 229 | 230 | def test_enqueue_json( 231 | client: QStash, 232 | cleanup_queue: Callable[[QStash, str], None], 233 | ) -> None: 234 | name = "test_queue" 235 | cleanup_queue(client, name) 236 | 237 | res = client.message.enqueue_json( 238 | queue=name, 239 | body={"test": "body"}, 240 | method="GET", 241 | url="https://mock.httpstatus.io/200", 242 | headers={ 243 | "test-header": "test-value", 244 | }, 245 | ) 246 | 247 | assert isinstance(res, EnqueueResponse) 248 | 249 | assert len(res.message_id) > 0 250 | 251 | 252 | def test_enqueue_api_llm( 253 | client: QStash, 254 | cleanup_queue: Callable[[QStash, str], None], 255 | ) -> None: 256 | name = "test_queue" 257 | cleanup_queue(client, name) 258 | 259 | res = client.message.enqueue_json( 260 | queue=name, 261 | body={ 262 | "model": "gpt-3.5-turbo", 263 | "messages": [ 264 | { 265 | "role": "user", 266 | "content": "just say hello", 267 | } 268 | ], 269 | }, 270 | api={"name": "llm", "provider": openai(OPENAI_API_KEY)}, 271 | callback="https://mock.httpstatus.io/200", 272 | ) 273 | 274 | assert isinstance(res, EnqueueResponse) 275 | 276 | assert len(res.message_id) > 0 277 | 278 | 279 | def test_publish_to_url_group(client: QStash) -> None: 280 | name = "python_url_group" 281 | client.url_group.delete(name) 282 | 283 | client.url_group.upsert_endpoints( 284 | url_group=name, 285 | endpoints=[ 286 | {"url": "https://mock.httpstatus.io/200"}, 287 | {"url": "https://mock.httpstatus.io/201"}, 288 | ], 289 | ) 290 | 291 | res = client.message.publish( 292 | method="GET", 293 | body="test-body", 294 | url_group=name, 295 | ) 296 | 297 | assert isinstance(res, list) 298 | assert len(res) == 2 299 | 300 | assert_delivered_eventually(client, res[0].message_id) 301 | assert_delivered_eventually(client, res[1].message_id) 302 | 303 | 304 | def test_timeout(client: QStash) -> None: 305 | res = client.message.publish_json( 306 | body={"ex_key": "ex_value"}, 307 | method="GET", 308 | url="https://mock.httpstatus.io/200", 309 | timeout=90, 310 | ) 311 | 312 | assert isinstance(res, PublishResponse) 313 | assert len(res.message_id) > 0 314 | 315 | assert_delivered_eventually(client, res.message_id) 316 | 317 | 318 | def test_cancel_many(client: QStash) -> None: 319 | res0 = client.message.publish( 320 | url="http://httpstat.us/404", 321 | retries=3, 322 | ) 323 | 324 | assert isinstance(res0, PublishResponse) 325 | 326 | res1 = client.message.publish( 327 | url="http://httpstat.us/404", 328 | retries=3, 329 | ) 330 | 331 | assert isinstance(res1, PublishResponse) 332 | 333 | cancelled = client.message.cancel_many([res0.message_id, res1.message_id]) 334 | 335 | assert cancelled == 2 336 | 337 | 338 | def test_cancel_all(client: QStash) -> None: 339 | res0 = client.message.publish( 340 | url="http://httpstat.us/404", 341 | retries=3, 342 | ) 343 | 344 | assert isinstance(res0, PublishResponse) 345 | 346 | res1 = client.message.publish( 347 | url="http://httpstat.us/404", 348 | retries=3, 349 | ) 350 | 351 | assert isinstance(res1, PublishResponse) 352 | 353 | cancelled = client.message.cancel_all() 354 | 355 | assert cancelled >= 2 356 | 357 | 358 | def test_publish_to_api_llm_custom_provider(client: QStash) -> None: 359 | res = client.message.publish_json( 360 | api={"name": "llm", "provider": openai(OPENAI_API_KEY)}, 361 | body={ 362 | "model": "gpt-3.5-turbo", 363 | "messages": [ 364 | { 365 | "role": "user", 366 | "content": "just say hello", 367 | } 368 | ], 369 | }, 370 | callback="https://mock.httpstatus.io/200", 371 | ) 372 | 373 | assert isinstance(res, PublishResponse) 374 | assert len(res.message_id) > 0 375 | 376 | assert_delivered_eventually(client, res.message_id) 377 | 378 | 379 | def test_enqueue_api_llm_custom_provider( 380 | client: QStash, 381 | cleanup_queue: Callable[[QStash, str], None], 382 | ) -> None: 383 | name = "test_queue" 384 | cleanup_queue(client, name) 385 | 386 | res = client.message.enqueue_json( 387 | queue=name, 388 | body={ 389 | "model": "gpt-3.5-turbo", 390 | "messages": [ 391 | { 392 | "role": "user", 393 | "content": "just say hello", 394 | } 395 | ], 396 | }, 397 | api={"name": "llm", "provider": openai(OPENAI_API_KEY)}, 398 | callback="https://mock.httpstatus.io/200", 399 | ) 400 | 401 | assert isinstance(res, EnqueueResponse) 402 | 403 | assert len(res.message_id) > 0 404 | 405 | 406 | def test_publish_with_flow_control( 407 | client: QStash, 408 | ) -> None: 409 | result = client.message.publish_json( 410 | body={"ex_key": "ex_value"}, 411 | url="https://mock.httpstatus.io/200?sleep=30000", 412 | flow_control=FlowControl(key="flow-key", parallelism=3, rate=4, period=2), 413 | ) 414 | 415 | assert isinstance(result, PublishResponse) 416 | message = client.message.get(result.message_id) 417 | 418 | flow_control = message.flow_control 419 | assert flow_control is not None 420 | assert flow_control.key == "flow-key" 421 | assert flow_control.parallelism == 3 422 | assert flow_control.rate == 4 423 | assert flow_control.period == 2 424 | 425 | 426 | def test_batch_with_flow_control(client: QStash) -> None: 427 | result = client.message.batch_json( 428 | [ 429 | { 430 | "body": {"ex_key": "ex_value"}, 431 | "url": "https://mock.httpstatus.io/200?sleep=30000", 432 | "flow_control": FlowControl(key="flow-key-1", rate=1), 433 | }, 434 | BatchJsonRequest( 435 | body={"ex_key": "ex_value"}, 436 | url="https://mock.httpstatus.io/200?sleep=30000", 437 | flow_control=FlowControl(key="flow-key-2", rate=23, period="1h30m3s"), 438 | ), 439 | { 440 | "body": {"ex_key": "ex_value"}, 441 | "url": "https://mock.httpstatus.io/200?sleep=30000", 442 | "flow_control": FlowControl(key="flow-key-3", parallelism=5), 443 | }, 444 | ] 445 | ) 446 | 447 | assert isinstance(result[0], BatchResponse) 448 | message1 = client.message.get(result[0].message_id) 449 | 450 | flow_control1 = message1.flow_control 451 | assert flow_control1 is not None 452 | assert flow_control1.key == "flow-key-1" 453 | assert flow_control1.parallelism is None 454 | assert flow_control1.rate == 1 455 | assert flow_control1.period == 1 456 | 457 | assert isinstance(result[1], BatchResponse) 458 | message2 = client.message.get(result[1].message_id) 459 | 460 | flow_control2 = message2.flow_control 461 | assert flow_control2 is not None 462 | assert flow_control2.key == "flow-key-2" 463 | assert flow_control2.parallelism is None 464 | assert flow_control2.rate == 23 465 | assert flow_control2.period == 5403 466 | 467 | assert isinstance(result[2], BatchResponse) 468 | message3 = client.message.get(result[2].message_id) 469 | 470 | flow_control3 = message3.flow_control 471 | assert flow_control3 is not None 472 | assert flow_control3.key == "flow-key-3" 473 | assert flow_control3.parallelism == 5 474 | assert flow_control3.rate is None 475 | assert flow_control3.period == 1 476 | 477 | 478 | def test_publish_with_label(client: QStash) -> None: 479 | res = client.message.publish( 480 | body="test-body-with-label", 481 | url="https://mock.httpstatus.io/200", 482 | label="test-publish-label", 483 | ) 484 | 485 | assert isinstance(res, PublishResponse) 486 | assert len(res.message_id) > 0 487 | 488 | # Verify the message has the label 489 | message = client.message.get(res.message_id) 490 | assert message.label == "test-publish-label" 491 | 492 | 493 | def test_publish_json_with_label(client: QStash) -> None: 494 | res = client.message.publish_json( 495 | body={"test": "data", "label": "json-test"}, 496 | url="https://mock.httpstatus.io/200", 497 | label="test-json-label", 498 | ) 499 | 500 | assert isinstance(res, PublishResponse) 501 | assert len(res.message_id) > 0 502 | 503 | # Verify the message has the label 504 | message = client.message.get(res.message_id) 505 | assert message.label == "test-json-label" 506 | 507 | 508 | def test_enqueue_with_label( 509 | client: QStash, 510 | cleanup_queue: Callable[[QStash, str], None], 511 | ) -> None: 512 | queue_name = "test_queue_with_label" 513 | cleanup_queue(client, queue_name) 514 | 515 | res = client.message.enqueue( 516 | queue=queue_name, 517 | body="test-enqueue-body", 518 | url="https://mock.httpstatus.io/200", 519 | label="test-enqueue-label", 520 | ) 521 | 522 | assert isinstance(res, EnqueueResponse) 523 | assert len(res.message_id) > 0 524 | 525 | # Verify the message has the label 526 | message = client.message.get(res.message_id) 527 | assert message.label == "test-enqueue-label" 528 | 529 | 530 | def test_enqueue_json_with_label( 531 | client: QStash, 532 | cleanup_queue: Callable[[QStash, str], None], 533 | ) -> None: 534 | queue_name = "test_queue_json_label" 535 | cleanup_queue(client, queue_name) 536 | 537 | res = client.message.enqueue_json( 538 | queue=queue_name, 539 | body={"enqueue": "json-data"}, 540 | url="https://mock.httpstatus.io/200", 541 | label="test-enqueue-json-label", 542 | ) 543 | 544 | assert isinstance(res, EnqueueResponse) 545 | assert len(res.message_id) > 0 546 | 547 | # Verify the message has the label 548 | message = client.message.get(res.message_id) 549 | assert message.label == "test-enqueue-json-label" 550 | 551 | 552 | def test_batch_with_label(client: QStash) -> None: 553 | result = client.message.batch( 554 | [ 555 | BatchRequest( 556 | body="batch-test-1", 557 | url="https://mock.httpstatus.io/200", 558 | label="batch-label-1", 559 | ), 560 | BatchRequest( 561 | body="batch-test-2", 562 | url="https://mock.httpstatus.io/201", 563 | label="batch-label-2", 564 | ), 565 | ] 566 | ) 567 | 568 | assert len(result) == 2 569 | assert isinstance(result[0], BatchResponse) 570 | assert isinstance(result[1], BatchResponse) 571 | 572 | # Verify the messages have the correct labels 573 | message1 = client.message.get(result[0].message_id) 574 | message2 = client.message.get(result[1].message_id) 575 | 576 | assert message1.label == "batch-label-1" 577 | assert message2.label == "batch-label-2" 578 | 579 | 580 | def test_batch_json_with_label(client: QStash) -> None: 581 | result = client.message.batch_json( 582 | [ 583 | BatchJsonRequest( 584 | body={"batch": "json-1"}, 585 | url="https://mock.httpstatus.io/200", 586 | label="batch-json-label-1", 587 | ), 588 | BatchJsonRequest( 589 | body={"batch": "json-2"}, 590 | url="https://mock.httpstatus.io/201", 591 | label="batch-json-label-2", 592 | ), 593 | ] 594 | ) 595 | 596 | assert len(result) == 2 597 | assert isinstance(result[0], BatchResponse) 598 | assert isinstance(result[1], BatchResponse) 599 | 600 | # Verify the messages have the correct labels 601 | message1 = client.message.get(result[0].message_id) 602 | message2 = client.message.get(result[1].message_id) 603 | 604 | assert message1.label == "batch-json-label-1" 605 | assert message2.label == "batch-json-label-2" 606 | 607 | 608 | def test_log_filtering_by_label(client: QStash) -> None: 609 | # Publish a message with a specific label 610 | res = client.message.publish( 611 | body="test-log-filtering", 612 | url="https://mock.httpstatus.io/200", 613 | label="log-filter-test", 614 | ) 615 | 616 | assert isinstance(res, PublishResponse) 617 | 618 | # Wait for message delivery and then check logs with label filter 619 | def assertion() -> None: 620 | logs = client.log.list( 621 | filter={ 622 | "label": "log-filter-test", 623 | } 624 | ).logs 625 | 626 | # Should find at least one log entry with our label 627 | assert len(logs) > 0 628 | # Verify that all returned logs have the expected label 629 | for log in logs: 630 | assert log.label == "log-filter-test" 631 | 632 | assert_eventually( 633 | assertion, 634 | initial_delay=1.0, 635 | retry_delay=1.0, 636 | timeout=30.0, 637 | ) 638 | -------------------------------------------------------------------------------- /qstash/schedule.py: -------------------------------------------------------------------------------- 1 | import dataclasses 2 | import enum 3 | import json 4 | from typing import Any, Dict, List, Optional, Union 5 | 6 | from qstash.http import HttpClient, HttpMethod 7 | from qstash.message import FlowControl, parse_flow_control, FlowControlProperties 8 | 9 | 10 | class ScheduleState(enum.Enum): 11 | IN_PROGRESS = "IN_PROGRESS" 12 | SUCCESS = "SUCCESS" 13 | FAIL = "FAIL" 14 | 15 | 16 | @dataclasses.dataclass 17 | class Schedule: 18 | schedule_id: str 19 | """Id of the schedule.""" 20 | 21 | destination: str 22 | """Destination url or url group.""" 23 | 24 | cron: str 25 | """Cron expression used to schedule the messages.""" 26 | 27 | created_at: int 28 | """Unix time in milliseconds when the schedule was created.""" 29 | 30 | body: Optional[str] 31 | """Body of the scheduled message if it is composed of UTF-8 characters only.""" 32 | 33 | body_base64: Optional[str] 34 | """Base64 encoded body if the scheduled message body contains non-UTF-8 characters.""" 35 | 36 | method: HttpMethod 37 | """HTTP method to use to deliver the message.""" 38 | 39 | headers: Optional[Dict[str, List[str]]] 40 | """Headers that will be forwarded to destination.""" 41 | 42 | callback_headers: Optional[Dict[str, List[str]]] 43 | """Headers that will be forwarded to callback url.""" 44 | 45 | failure_callback_headers: Optional[Dict[str, List[str]]] 46 | """Headers that will be forwarded to failure callback url.""" 47 | 48 | retries: int 49 | """Number of retries that should be attempted in case of delivery failure.""" 50 | 51 | retry_delay_expression: Optional[str] 52 | """ 53 | The retry delay expression for this DLQ message, 54 | if retry_delay was set when publishing the message. 55 | """ 56 | 57 | callback: Optional[str] 58 | """Url which is called each time the message is attempted to be delivered.""" 59 | 60 | failure_callback: Optional[str] 61 | """Url which is called after the message is failed.""" 62 | 63 | queue: Optional[str] 64 | """ 65 | Name of the queue which the messages will be enqueued, 66 | if the destination is a queue. 67 | """ 68 | 69 | delay: Optional[int] 70 | """Delay in seconds before the message is delivered.""" 71 | 72 | timeout: Optional[int] 73 | """HTTP timeout value to use while calling the destination url.""" 74 | 75 | caller_ip: Optional[str] 76 | """IP address of the creator of this schedule.""" 77 | 78 | paused: bool 79 | """Whether the schedule is paused or not.""" 80 | 81 | flow_control: Optional[FlowControlProperties] 82 | """Flow control properties""" 83 | 84 | last_schedule_time: Optional[int] 85 | """Unix time of the last schedule, in milliseconds.""" 86 | 87 | next_schedule_time: Optional[int] 88 | """Unix time of the next schedule, in milliseconds.""" 89 | 90 | last_schedule_states: Optional[Dict[str, ScheduleState]] 91 | """ 92 | Map of the message ids to schedule states for the 93 | published/enqueued messages in the last schedule run. 94 | """ 95 | 96 | error: Optional[str] 97 | """Error message of the last schedule trigger.""" 98 | 99 | label: Optional[str] 100 | """Label assigned to the schedule for filtering logs.""" 101 | 102 | 103 | def prepare_schedule_headers( 104 | *, 105 | cron: str, 106 | content_type: Optional[str], 107 | method: Optional[HttpMethod], 108 | headers: Optional[Dict[str, str]], 109 | callback_headers: Optional[Dict[str, str]], 110 | failure_callback_headers: Optional[Dict[str, str]], 111 | retries: Optional[int], 112 | retry_delay: Optional[str], 113 | callback: Optional[str], 114 | failure_callback: Optional[str], 115 | delay: Optional[Union[str, int]], 116 | timeout: Optional[Union[str, int]], 117 | schedule_id: Optional[str], 118 | queue: Optional[str], 119 | flow_control: Optional[FlowControl], 120 | label: Optional[str], 121 | ) -> Dict[str, str]: 122 | h = { 123 | "Upstash-Cron": cron, 124 | } 125 | 126 | if content_type is not None: 127 | h["Content-Type"] = content_type 128 | 129 | if method is not None: 130 | h["Upstash-Method"] = method 131 | 132 | if headers: 133 | for k, v in headers.items(): 134 | if not k.lower().startswith("upstash-forward-"): 135 | k = f"Upstash-Forward-{k}" 136 | 137 | h[k] = str(v) 138 | 139 | if callback_headers: 140 | for k, v in callback_headers.items(): 141 | if not k.lower().startswith("upstash-callback-"): 142 | k = f"Upstash-Callback-{k}" 143 | 144 | h[k] = str(v) 145 | 146 | if failure_callback_headers: 147 | for k, v in failure_callback_headers.items(): 148 | if not k.lower().startswith("upstash-failure-callback-"): 149 | k = f"Upstash-Failure-Callback-{k}" 150 | 151 | h[k] = str(v) 152 | 153 | if retries is not None: 154 | h["Upstash-Retries"] = str(retries) 155 | 156 | if retry_delay is not None: 157 | h["Upstash-Retry-Delay"] = retry_delay 158 | 159 | if callback is not None: 160 | h["Upstash-Callback"] = callback 161 | 162 | if failure_callback is not None: 163 | h["Upstash-Failure-Callback"] = failure_callback 164 | 165 | if delay is not None: 166 | if isinstance(delay, int): 167 | h["Upstash-Delay"] = f"{delay}s" 168 | else: 169 | h["Upstash-Delay"] = delay 170 | 171 | if timeout is not None: 172 | if isinstance(timeout, int): 173 | h["Upstash-Timeout"] = f"{timeout}s" 174 | else: 175 | h["Upstash-Timeout"] = timeout 176 | 177 | if schedule_id is not None: 178 | h["Upstash-Schedule-Id"] = schedule_id 179 | 180 | if flow_control and "key" in flow_control: 181 | control_values = [] 182 | 183 | if "parallelism" in flow_control: 184 | control_values.append(f"parallelism={flow_control['parallelism']}") 185 | 186 | if "rate" in flow_control: 187 | control_values.append(f"rate={flow_control['rate']}") 188 | 189 | if "period" in flow_control: 190 | period = flow_control["period"] 191 | if isinstance(period, int): 192 | period = f"{period}s" 193 | 194 | control_values.append(f"period={period}") 195 | 196 | h["Upstash-Flow-Control-Key"] = flow_control["key"] 197 | h["Upstash-Flow-Control-Value"] = ", ".join(control_values) 198 | 199 | if queue is not None: 200 | h["Upstash-Queue-Name"] = queue 201 | 202 | if label is not None: 203 | h["Upstash-Label"] = label 204 | 205 | return h 206 | 207 | 208 | def parse_schedule_response(response: Dict[str, Any]) -> Schedule: 209 | flow_control = parse_flow_control(response) 210 | 211 | if "lastScheduleStates" in response: 212 | last_states = response["lastScheduleStates"] 213 | for k, v in last_states.items(): 214 | last_states[k] = ScheduleState(v) 215 | else: 216 | last_states = None 217 | 218 | return Schedule( 219 | schedule_id=response["scheduleId"], 220 | destination=response["destination"], 221 | cron=response["cron"], 222 | created_at=response["createdAt"], 223 | body=response.get("body"), 224 | body_base64=response.get("bodyBase64"), 225 | method=response["method"], 226 | headers=response.get("header"), 227 | callback_headers=response.get("callbackHeader"), 228 | failure_callback_headers=response.get("failureCallbackHeader"), 229 | retries=response["retries"], 230 | callback=response.get("callback"), 231 | failure_callback=response.get("failureCallback"), 232 | delay=response.get("delay"), 233 | timeout=response.get("timeout"), 234 | caller_ip=response.get("callerIP"), 235 | paused=response.get("isPaused", False), 236 | queue=response.get("queueName"), 237 | flow_control=flow_control, 238 | last_schedule_time=response.get("lastScheduleTime"), 239 | next_schedule_time=response.get("nextScheduleTime"), 240 | last_schedule_states=last_states, 241 | error=response.get("error"), 242 | retry_delay_expression=response.get("retryDelayExpression"), 243 | label=response.get("label"), 244 | ) 245 | 246 | 247 | class ScheduleApi: 248 | def __init__(self, http: HttpClient) -> None: 249 | self._http = http 250 | 251 | def create( 252 | self, 253 | *, 254 | destination: str, 255 | cron: str, 256 | body: Optional[Union[str, bytes]] = None, 257 | content_type: Optional[str] = None, 258 | method: Optional[HttpMethod] = None, 259 | headers: Optional[Dict[str, str]] = None, 260 | callback_headers: Optional[Dict[str, str]] = None, 261 | failure_callback_headers: Optional[Dict[str, str]] = None, 262 | retries: Optional[int] = None, 263 | retry_delay: Optional[str] = None, 264 | callback: Optional[str] = None, 265 | failure_callback: Optional[str] = None, 266 | delay: Optional[Union[str, int]] = None, 267 | timeout: Optional[Union[str, int]] = None, 268 | schedule_id: Optional[str] = None, 269 | queue: Optional[str] = None, 270 | flow_control: Optional[FlowControl] = None, 271 | label: Optional[str] = None, 272 | ) -> str: 273 | """ 274 | Creates a schedule to send messages periodically. 275 | 276 | Returns the created schedule id. 277 | 278 | :param destination: The destination url or url group. 279 | :param cron: The cron expression to use to schedule the messages. 280 | :param body: The raw request message body passed to the destination as is. 281 | :param content_type: MIME type of the message. 282 | :param method: The HTTP method to use when sending a webhook to your API. 283 | :param headers: Headers to forward along with the message. 284 | :param callback_headers: Headers to forward along with the callback message. 285 | :param failure_callback_headers: Headers to forward along with the failure 286 | callback message. 287 | :param retries: How often should this message be retried in case the destination 288 | API is not available. 289 | :param retry_delay: Delay between retries. 290 | 291 | By default, the `retryDelay` is exponential backoff. 292 | More details can be found in: https://upstash.com/docs/qstash/features/retry. 293 | 294 | The `retryDelay` option allows you to customize the delay (in milliseconds) between retry attempts when message delivery fails. 295 | 296 | You can use mathematical expressions and the following built-in functions to calculate the delay dynamically. 297 | The special variable `retried` represents the current retry attempt count (starting from 0). 298 | 299 | Supported functions: 300 | - `pow` 301 | - `sqrt` 302 | - `abs` 303 | - `exp` 304 | - `floor` 305 | - `ceil` 306 | - `round` 307 | - `min` 308 | - `max` 309 | 310 | Examples of valid `retryDelay` values: 311 | ```py 312 | 1000 # 1 second 313 | 1000 * (1 + retried) # 1 second multiplied by the current retry attempt 314 | pow(2, retried) # 2 to the power of the current retry attempt 315 | max(10, pow(2, retried)) # The greater of 10 or 2^retried 316 | ``` 317 | :param callback: A callback url that will be called after each attempt. 318 | :param failure_callback: A failure callback url that will be called when a delivery 319 | is failed, that is when all the defined retries are exhausted. 320 | :param delay: Delay the message delivery. The format for the delay string is a 321 | number followed by duration abbreviation, like `10s`. Available durations 322 | are `s` (seconds), `m` (minutes), `h` (hours), and `d` (days). As convenience, 323 | it is also possible to specify the delay as an integer, which will be 324 | interpreted as delay in seconds. 325 | :param timeout: The HTTP timeout value to use while calling the destination URL. 326 | When a timeout is specified, it will be used instead of the maximum timeout 327 | value permitted by the QStash plan. It is useful in scenarios, where a message 328 | should be delivered with a shorter timeout. 329 | :param schedule_id: Schedule id to use. This can be used to update the settings 330 | of an existing schedule. 331 | :param queue: Name of the queue which the scheduled messages will be enqueued. 332 | :param flow_control: Settings for controlling the number of active requests, 333 | as well as the rate of requests with the same flow control key. 334 | :param label: Assign a label to the request to filter logs with it later. 335 | """ 336 | req_headers = prepare_schedule_headers( 337 | cron=cron, 338 | content_type=content_type, 339 | method=method, 340 | headers=headers, 341 | callback_headers=callback_headers, 342 | failure_callback_headers=failure_callback_headers, 343 | retries=retries, 344 | retry_delay=retry_delay, 345 | callback=callback, 346 | failure_callback=failure_callback, 347 | delay=delay, 348 | timeout=timeout, 349 | schedule_id=schedule_id, 350 | queue=queue, 351 | flow_control=flow_control, 352 | label=label, 353 | ) 354 | 355 | response = self._http.request( 356 | path=f"/v2/schedules/{destination}", 357 | method="POST", 358 | headers=req_headers, 359 | body=body, 360 | ) 361 | 362 | return response["scheduleId"] # type:ignore[no-any-return] 363 | 364 | def create_json( 365 | self, 366 | *, 367 | destination: str, 368 | cron: str, 369 | body: Optional[Any] = None, 370 | method: Optional[HttpMethod] = None, 371 | headers: Optional[Dict[str, str]] = None, 372 | callback_headers: Optional[Dict[str, str]] = None, 373 | failure_callback_headers: Optional[Dict[str, str]] = None, 374 | retries: Optional[int] = None, 375 | retry_delay: Optional[str] = None, 376 | callback: Optional[str] = None, 377 | failure_callback: Optional[str] = None, 378 | delay: Optional[Union[str, int]] = None, 379 | timeout: Optional[Union[str, int]] = None, 380 | schedule_id: Optional[str] = None, 381 | queue: Optional[str] = None, 382 | flow_control: Optional[FlowControl] = None, 383 | label: Optional[str] = None, 384 | ) -> str: 385 | """ 386 | Creates a schedule to send messages periodically, automatically serializing the 387 | body as JSON string, and setting content type to `application/json`. 388 | 389 | Returns the created schedule id. 390 | 391 | :param destination: The destination url or url group. 392 | :param cron: The cron expression to use to schedule the messages. 393 | :param body: The request message body passed to the destination after being 394 | serialized as JSON string. 395 | :param method: The HTTP method to use when sending a webhook to your API. 396 | :param headers: Headers to forward along with the message. 397 | :param callback_headers: Headers to forward along with the callback message. 398 | :param failure_callback_headers: Headers to forward along with the failure 399 | callback message. 400 | :param retries: How often should this message be retried in case the destination 401 | API is not available. 402 | :param retry_delay: Delay between retries. 403 | 404 | By default, the `retryDelay` is exponential backoff. 405 | More details can be found in: https://upstash.com/docs/qstash/features/retry. 406 | 407 | The `retryDelay` option allows you to customize the delay (in milliseconds) between retry attempts when message delivery fails. 408 | 409 | You can use mathematical expressions and the following built-in functions to calculate the delay dynamically. 410 | The special variable `retried` represents the current retry attempt count (starting from 0). 411 | 412 | Supported functions: 413 | - `pow` 414 | - `sqrt` 415 | - `abs` 416 | - `exp` 417 | - `floor` 418 | - `ceil` 419 | - `round` 420 | - `min` 421 | - `max` 422 | 423 | Examples of valid `retryDelay` values: 424 | ```py 425 | 1000 # 1 second 426 | 1000 * (1 + retried) # 1 second multiplied by the current retry attempt 427 | pow(2, retried) # 2 to the power of the current retry attempt 428 | max(10, pow(2, retried)) # The greater of 10 or 2^retried 429 | ``` 430 | :param callback: A callback url that will be called after each attempt. 431 | :param failure_callback: A failure callback url that will be called when a delivery 432 | is failed, that is when all the defined retries are exhausted. 433 | :param delay: Delay the message delivery. The format for the delay string is a 434 | number followed by duration abbreviation, like `10s`. Available durations 435 | are `s` (seconds), `m` (minutes), `h` (hours), and `d` (days). As convenience, 436 | it is also possible to specify the delay as an integer, which will be 437 | interpreted as delay in seconds. 438 | :param timeout: The HTTP timeout value to use while calling the destination URL. 439 | When a timeout is specified, it will be used instead of the maximum timeout 440 | value permitted by the QStash plan. It is useful in scenarios, where a message 441 | should be delivered with a shorter timeout. 442 | :param schedule_id: Schedule id to use. This can be used to update the settings 443 | of an existing schedule. 444 | :param queue: Name of the queue which the scheduled messages will be enqueued. 445 | :param flow_control: Settings for controlling the number of active requests, 446 | as well as the rate of requests with the same flow control key. 447 | :param label: Assign a label to the request to filter logs with it later. 448 | """ 449 | return self.create( 450 | destination=destination, 451 | cron=cron, 452 | body=json.dumps(body), 453 | content_type="application/json", 454 | method=method, 455 | headers=headers, 456 | callback_headers=callback_headers, 457 | failure_callback_headers=failure_callback_headers, 458 | retries=retries, 459 | retry_delay=retry_delay, 460 | callback=callback, 461 | failure_callback=failure_callback, 462 | delay=delay, 463 | timeout=timeout, 464 | schedule_id=schedule_id, 465 | queue=queue, 466 | flow_control=flow_control, 467 | label=label, 468 | ) 469 | 470 | def get(self, schedule_id: str) -> Schedule: 471 | """ 472 | Gets the schedule by its id. 473 | """ 474 | response = self._http.request( 475 | path=f"/v2/schedules/{schedule_id}", 476 | method="GET", 477 | ) 478 | 479 | return parse_schedule_response(response) 480 | 481 | def list(self) -> List[Schedule]: 482 | """ 483 | Lists all the schedules. 484 | """ 485 | response = self._http.request( 486 | path="/v2/schedules", 487 | method="GET", 488 | ) 489 | 490 | return [parse_schedule_response(r) for r in response] 491 | 492 | def delete(self, schedule_id: str) -> None: 493 | """ 494 | Deletes the schedule. 495 | """ 496 | self._http.request( 497 | path=f"/v2/schedules/{schedule_id}", 498 | method="DELETE", 499 | parse_response=False, 500 | ) 501 | 502 | def pause(self, schedule_id: str) -> None: 503 | """ 504 | Pauses the schedule. 505 | 506 | A paused schedule will not produce new messages until 507 | it is resumed. 508 | """ 509 | self._http.request( 510 | path=f"/v2/schedules/{schedule_id}/pause", 511 | method="PATCH", 512 | parse_response=False, 513 | ) 514 | 515 | def resume(self, schedule_id: str) -> None: 516 | """ 517 | Resumes the schedule. 518 | """ 519 | self._http.request( 520 | path=f"/v2/schedules/{schedule_id}/resume", 521 | method="PATCH", 522 | parse_response=False, 523 | ) 524 | -------------------------------------------------------------------------------- /tests/asyncio/test_message.py: -------------------------------------------------------------------------------- 1 | from typing import Callable 2 | 3 | import pytest 4 | 5 | from qstash import AsyncQStash 6 | from qstash.chat import openai 7 | from qstash.errors import QStashError 8 | from qstash.log import LogState 9 | from qstash.message import ( 10 | BatchJsonRequest, 11 | BatchRequest, 12 | BatchResponse, 13 | EnqueueResponse, 14 | PublishResponse, 15 | FlowControl, 16 | ) 17 | from tests import assert_eventually_async, OPENAI_API_KEY 18 | 19 | 20 | async def assert_delivered_eventually_async( 21 | async_client: AsyncQStash, msg_id: str 22 | ) -> None: 23 | async def assertion() -> None: 24 | logs = ( 25 | await async_client.log.list( 26 | filter={ 27 | "message_id": msg_id, 28 | "state": LogState.DELIVERED, 29 | } 30 | ) 31 | ).logs 32 | 33 | assert len(logs) == 1 34 | 35 | await assert_eventually_async( 36 | assertion, 37 | initial_delay=1.0, 38 | retry_delay=1.0, 39 | timeout=60.0, 40 | ) 41 | 42 | 43 | @pytest.mark.asyncio 44 | async def test_publish_to_url_async(async_client: AsyncQStash) -> None: 45 | res = await async_client.message.publish( 46 | body="test-body", 47 | method="GET", 48 | url="https://mock.httpstatus.io/200", 49 | headers={ 50 | "test-header": "test-value", 51 | }, 52 | ) 53 | 54 | assert isinstance(res, PublishResponse) 55 | assert len(res.message_id) > 0 56 | 57 | await assert_delivered_eventually_async(async_client, res.message_id) 58 | 59 | 60 | @pytest.mark.asyncio 61 | async def test_publish_to_url_json_async(async_client: AsyncQStash) -> None: 62 | res = await async_client.message.publish_json( 63 | body={"ex_key": "ex_value"}, 64 | method="GET", 65 | url="https://mock.httpstatus.io/200", 66 | headers={ 67 | "test-header": "test-value", 68 | }, 69 | ) 70 | 71 | assert isinstance(res, PublishResponse) 72 | assert len(res.message_id) > 0 73 | 74 | await assert_delivered_eventually_async(async_client, res.message_id) 75 | 76 | 77 | @pytest.mark.asyncio 78 | async def test_disallow_multiple_destinations_async(async_client: AsyncQStash) -> None: 79 | with pytest.raises(QStashError): 80 | await async_client.message.publish_json( 81 | method="GET", 82 | url="https://mock.httpstatus.io/200", 83 | url_group="test-url-group", 84 | ) 85 | 86 | with pytest.raises(QStashError): 87 | await async_client.message.publish_json( 88 | method="GET", 89 | url="https://mock.httpstatus.io/200", 90 | api={"name": "llm", "provider": openai(OPENAI_API_KEY)}, 91 | ) 92 | 93 | with pytest.raises(QStashError): 94 | await async_client.message.publish_json( 95 | url_group="test-url-group", 96 | api={"name": "llm", "provider": openai(OPENAI_API_KEY)}, 97 | ) 98 | 99 | 100 | @pytest.mark.asyncio 101 | async def test_batch_async(async_client: AsyncQStash) -> None: 102 | N = 3 103 | messages = [] 104 | for i in range(N): 105 | messages.append( 106 | BatchRequest( 107 | body=f"hi {i}", 108 | method="GET", 109 | url="https://mock.httpstatus.io/200", 110 | retries=0, 111 | headers={ 112 | f"test-header-{i}": f"test-value-{i}", 113 | "content-type": "text/plain", 114 | }, 115 | ) 116 | ) 117 | 118 | res = await async_client.message.batch(messages) 119 | 120 | assert len(res) == N 121 | 122 | for r in res: 123 | assert isinstance(r, BatchResponse) 124 | assert len(r.message_id) > 0 125 | 126 | 127 | @pytest.mark.asyncio 128 | async def test_batch_json_async(async_client: AsyncQStash) -> None: 129 | N = 3 130 | messages = [] 131 | for i in range(N): 132 | messages.append( 133 | BatchJsonRequest( 134 | body={"hi": i}, 135 | method="GET", 136 | url="https://mock.httpstatus.io/200", 137 | retries=0, 138 | headers={ 139 | f"test-header-{i}": f"test-value-{i}", 140 | }, 141 | ) 142 | ) 143 | 144 | res = await async_client.message.batch_json(messages) 145 | 146 | assert len(res) == N 147 | 148 | for r in res: 149 | assert isinstance(r, BatchResponse) 150 | assert len(r.message_id) > 0 151 | 152 | 153 | @pytest.mark.asyncio 154 | async def test_publish_to_api_llm_async(async_client: AsyncQStash) -> None: 155 | res = await async_client.message.publish_json( 156 | api={"name": "llm", "provider": openai(OPENAI_API_KEY)}, 157 | body={ 158 | "model": "gpt-3.5-turbo", 159 | "messages": [ 160 | { 161 | "role": "user", 162 | "content": "just say hello", 163 | } 164 | ], 165 | }, 166 | callback="https://mock.httpstatus.io/200", 167 | ) 168 | 169 | assert isinstance(res, PublishResponse) 170 | assert len(res.message_id) > 0 171 | 172 | await assert_delivered_eventually_async(async_client, res.message_id) 173 | 174 | 175 | @pytest.mark.asyncio 176 | async def test_batch_api_llm_async(async_client: AsyncQStash) -> None: 177 | res = await async_client.message.batch_json( 178 | [ 179 | { 180 | "api": {"name": "llm", "provider": openai(OPENAI_API_KEY)}, 181 | "body": { 182 | "model": "gpt-3.5-turbo", 183 | "messages": [ 184 | { 185 | "role": "user", 186 | "content": "just say hello", 187 | } 188 | ], 189 | }, 190 | "callback": "https://mock.httpstatus.io/200", 191 | }, 192 | ] 193 | ) 194 | 195 | assert len(res) == 1 196 | 197 | assert isinstance(res[0], BatchResponse) 198 | assert len(res[0].message_id) > 0 199 | 200 | await assert_delivered_eventually_async(async_client, res[0].message_id) 201 | 202 | 203 | @pytest.mark.asyncio 204 | async def test_enqueue_async( 205 | async_client: AsyncQStash, 206 | cleanup_queue_async: Callable[[AsyncQStash, str], None], 207 | ) -> None: 208 | name = "test_queue" 209 | cleanup_queue_async(async_client, name) 210 | 211 | res = await async_client.message.enqueue( 212 | queue=name, 213 | body="test-body", 214 | method="GET", 215 | url="https://mock.httpstatus.io/200", 216 | headers={ 217 | "test-header": "test-value", 218 | }, 219 | ) 220 | 221 | assert isinstance(res, EnqueueResponse) 222 | 223 | assert len(res.message_id) > 0 224 | 225 | 226 | @pytest.mark.asyncio 227 | async def test_enqueue_json_async( 228 | async_client: AsyncQStash, 229 | cleanup_queue_async: Callable[[AsyncQStash, str], None], 230 | ) -> None: 231 | name = "test_queue" 232 | cleanup_queue_async(async_client, name) 233 | 234 | res = await async_client.message.enqueue_json( 235 | queue=name, 236 | body={"test": "body"}, 237 | method="GET", 238 | url="https://mock.httpstatus.io/200", 239 | headers={ 240 | "test-header": "test-value", 241 | }, 242 | ) 243 | 244 | assert isinstance(res, EnqueueResponse) 245 | 246 | assert len(res.message_id) > 0 247 | 248 | 249 | @pytest.mark.asyncio 250 | async def test_enqueue_api_llm_async( 251 | async_client: AsyncQStash, 252 | cleanup_queue_async: Callable[[AsyncQStash, str], None], 253 | ) -> None: 254 | name = "test_queue" 255 | cleanup_queue_async(async_client, name) 256 | 257 | res = await async_client.message.enqueue_json( 258 | queue=name, 259 | body={ 260 | "model": "gpt-3.5-turbo", 261 | "messages": [ 262 | { 263 | "role": "user", 264 | "content": "just say hello", 265 | } 266 | ], 267 | }, 268 | api={"name": "llm", "provider": openai(OPENAI_API_KEY)}, 269 | callback="https://mock.httpstatus.io/200", 270 | ) 271 | 272 | assert isinstance(res, EnqueueResponse) 273 | 274 | assert len(res.message_id) > 0 275 | 276 | 277 | @pytest.mark.asyncio 278 | async def test_publish_to_url_group_async(async_client: AsyncQStash) -> None: 279 | name = "python_url_group" 280 | await async_client.url_group.delete(name) 281 | 282 | await async_client.url_group.upsert_endpoints( 283 | url_group=name, 284 | endpoints=[ 285 | {"url": "https://mock.httpstatus.io/200"}, 286 | {"url": "https://mock.httpstatus.io/201"}, 287 | ], 288 | ) 289 | 290 | res = await async_client.message.publish( 291 | body="test-body", 292 | url_group=name, 293 | ) 294 | 295 | assert isinstance(res, list) 296 | assert len(res) == 2 297 | 298 | await assert_delivered_eventually_async(async_client, res[0].message_id) 299 | await assert_delivered_eventually_async(async_client, res[1].message_id) 300 | 301 | 302 | @pytest.mark.asyncio 303 | async def test_timeout_async(async_client: AsyncQStash) -> None: 304 | res = await async_client.message.publish_json( 305 | body={"ex_key": "ex_value"}, 306 | method="GET", 307 | url="https://mock.httpstatus.io/200", 308 | timeout=90, 309 | ) 310 | 311 | assert isinstance(res, PublishResponse) 312 | assert len(res.message_id) > 0 313 | 314 | await assert_delivered_eventually_async(async_client, res.message_id) 315 | 316 | 317 | @pytest.mark.asyncio 318 | async def test_cancel_many_async(async_client: AsyncQStash) -> None: 319 | res0 = await async_client.message.publish( 320 | url="http://httpstat.us/404", 321 | retries=3, 322 | ) 323 | 324 | assert isinstance(res0, PublishResponse) 325 | 326 | res1 = await async_client.message.publish( 327 | url="http://httpstat.us/404", 328 | retries=3, 329 | ) 330 | 331 | assert isinstance(res1, PublishResponse) 332 | 333 | cancelled = await async_client.message.cancel_many( 334 | [res0.message_id, res1.message_id] 335 | ) 336 | 337 | assert cancelled == 2 338 | 339 | 340 | @pytest.mark.asyncio 341 | async def test_cancel_all_async(async_client: AsyncQStash) -> None: 342 | res0 = await async_client.message.publish( 343 | url="http://httpstat.us/404", 344 | retries=3, 345 | ) 346 | 347 | assert isinstance(res0, PublishResponse) 348 | 349 | res1 = await async_client.message.publish( 350 | url="http://httpstat.us/404", 351 | retries=3, 352 | ) 353 | 354 | assert isinstance(res1, PublishResponse) 355 | 356 | cancelled = await async_client.message.cancel_all() 357 | 358 | assert cancelled >= 2 359 | 360 | 361 | @pytest.mark.asyncio 362 | async def test_publish_to_api_llm_custom_provider_async( 363 | async_client: AsyncQStash, 364 | ) -> None: 365 | res = await async_client.message.publish_json( 366 | api={"name": "llm", "provider": openai(OPENAI_API_KEY)}, 367 | body={ 368 | "model": "gpt-3.5-turbo", 369 | "messages": [ 370 | { 371 | "role": "user", 372 | "content": "just say hello", 373 | } 374 | ], 375 | }, 376 | callback="https://mock.httpstatus.io/200", 377 | ) 378 | 379 | assert isinstance(res, PublishResponse) 380 | assert len(res.message_id) > 0 381 | 382 | await assert_delivered_eventually_async(async_client, res.message_id) 383 | 384 | 385 | @pytest.mark.asyncio 386 | async def test_enqueue_api_llm_custom_provider_async( 387 | async_client: AsyncQStash, 388 | cleanup_queue: Callable[[AsyncQStash, str], None], 389 | ) -> None: 390 | name = "test_queue" 391 | cleanup_queue(async_client, name) 392 | 393 | res = await async_client.message.enqueue_json( 394 | queue=name, 395 | body={ 396 | "model": "gpt-3.5-turbo", 397 | "messages": [ 398 | { 399 | "role": "user", 400 | "content": "just say hello", 401 | } 402 | ], 403 | }, 404 | api={"name": "llm", "provider": openai(OPENAI_API_KEY)}, 405 | callback="https://mock.httpstatus.io/200", 406 | ) 407 | 408 | assert isinstance(res, EnqueueResponse) 409 | 410 | assert len(res.message_id) > 0 411 | 412 | 413 | @pytest.mark.asyncio 414 | async def test_publish_with_flow_control_async( 415 | async_client: AsyncQStash, 416 | ) -> None: 417 | result = await async_client.message.publish_json( 418 | body={"ex_key": "ex_value"}, 419 | url="https://mock.httpstatus.io/200?sleep=30000", 420 | flow_control=FlowControl(key="flow-key", parallelism=3, rate=4, period=2), 421 | ) 422 | 423 | assert isinstance(result, PublishResponse) 424 | message = await async_client.message.get(result.message_id) 425 | 426 | flow_control = message.flow_control 427 | assert flow_control is not None 428 | assert flow_control.key == "flow-key" 429 | assert flow_control.parallelism == 3 430 | assert flow_control.rate == 4 431 | assert flow_control.period == 2 432 | 433 | 434 | @pytest.mark.asyncio 435 | async def test_batch_with_flow_control_async( 436 | async_client: AsyncQStash, 437 | ) -> None: 438 | result = await async_client.message.batch_json( 439 | [ 440 | BatchJsonRequest( 441 | body={"ex_key": "ex_value"}, 442 | url="https://mock.httpstatus.io/200?sleep=30000", 443 | flow_control=FlowControl(key="flow-key-1", rate=1), 444 | ), 445 | BatchJsonRequest( 446 | body={"ex_key": "ex_value"}, 447 | url="https://mock.httpstatus.io/200?sleep=30000", 448 | flow_control=FlowControl(key="flow-key-2", rate=23, period="1h30m3s"), 449 | ), 450 | BatchJsonRequest( 451 | body={"ex_key": "ex_value"}, 452 | url="https://mock.httpstatus.io/200?sleep=30000", 453 | flow_control=FlowControl(key="flow-key-3", parallelism=5), 454 | ), 455 | ] 456 | ) 457 | 458 | assert isinstance(result[0], BatchResponse) 459 | message1 = await async_client.message.get(result[0].message_id) 460 | 461 | flow_control1 = message1.flow_control 462 | assert flow_control1 is not None 463 | assert flow_control1.key == "flow-key-1" 464 | assert flow_control1.parallelism is None 465 | assert flow_control1.rate == 1 466 | assert flow_control1.period == 1 467 | 468 | assert isinstance(result[1], BatchResponse) 469 | message2 = await async_client.message.get(result[1].message_id) 470 | 471 | flow_control2 = message2.flow_control 472 | assert flow_control2 is not None 473 | assert flow_control2.key == "flow-key-2" 474 | assert flow_control2.parallelism is None 475 | assert flow_control2.rate == 23 476 | assert flow_control2.period == 5403 477 | 478 | assert isinstance(result[2], BatchResponse) 479 | message3 = await async_client.message.get(result[2].message_id) 480 | 481 | flow_control3 = message3.flow_control 482 | assert flow_control3 is not None 483 | assert flow_control3.key == "flow-key-3" 484 | assert flow_control3.parallelism == 5 485 | assert flow_control3.rate is None 486 | assert flow_control3.period == 1 487 | 488 | 489 | @pytest.mark.asyncio 490 | async def test_publish_with_label_async(async_client: AsyncQStash) -> None: 491 | res = await async_client.message.publish( 492 | body="test-body-with-label-async", 493 | url="https://mock.httpstatus.io/200", 494 | label="test-async-publish-label", 495 | ) 496 | 497 | assert isinstance(res, PublishResponse) 498 | assert len(res.message_id) > 0 499 | 500 | # Verify the message has the label 501 | message = await async_client.message.get(res.message_id) 502 | assert message.label == "test-async-publish-label" 503 | 504 | 505 | @pytest.mark.asyncio 506 | async def test_publish_json_with_label_async(async_client: AsyncQStash) -> None: 507 | res = await async_client.message.publish_json( 508 | body={"test": "async-data", "label": "json-async-test"}, 509 | url="https://mock.httpstatus.io/200", 510 | label="test-async-json-label", 511 | ) 512 | 513 | assert isinstance(res, PublishResponse) 514 | assert len(res.message_id) > 0 515 | 516 | # Verify the message has the label 517 | message = await async_client.message.get(res.message_id) 518 | assert message.label == "test-async-json-label" 519 | 520 | 521 | @pytest.mark.asyncio 522 | async def test_enqueue_with_label_async( 523 | async_client: AsyncQStash, 524 | cleanup_queue_async: Callable[[AsyncQStash, str], None], 525 | ) -> None: 526 | queue_name = "test_async_queue_with_label" 527 | cleanup_queue_async(async_client, queue_name) 528 | 529 | res = await async_client.message.enqueue( 530 | queue=queue_name, 531 | body="test-async-enqueue-body", 532 | url="https://mock.httpstatus.io/200", 533 | label="test-async-enqueue-label", 534 | ) 535 | 536 | assert isinstance(res, EnqueueResponse) 537 | assert len(res.message_id) > 0 538 | 539 | # Verify the message has the label 540 | message = await async_client.message.get(res.message_id) 541 | assert message.label == "test-async-enqueue-label" 542 | 543 | 544 | @pytest.mark.asyncio 545 | async def test_enqueue_json_with_label_async( 546 | async_client: AsyncQStash, 547 | cleanup_queue_async: Callable[[AsyncQStash, str], None], 548 | ) -> None: 549 | queue_name = "test_async_queue_json_label" 550 | cleanup_queue_async(async_client, queue_name) 551 | 552 | res = await async_client.message.enqueue_json( 553 | queue=queue_name, 554 | body={"enqueue": "async-json-data"}, 555 | url="https://mock.httpstatus.io/200", 556 | label="test-async-enqueue-json-label", 557 | ) 558 | 559 | assert isinstance(res, EnqueueResponse) 560 | assert len(res.message_id) > 0 561 | 562 | # Verify the message has the label 563 | message = await async_client.message.get(res.message_id) 564 | assert message.label == "test-async-enqueue-json-label" 565 | 566 | 567 | @pytest.mark.asyncio 568 | async def test_batch_with_label_async(async_client: AsyncQStash) -> None: 569 | result = await async_client.message.batch( 570 | [ 571 | BatchRequest( 572 | body="async-batch-test-1", 573 | url="https://mock.httpstatus.io/200", 574 | label="async-batch-label-1", 575 | ), 576 | BatchRequest( 577 | body="async-batch-test-2", 578 | url="https://mock.httpstatus.io/201", 579 | label="async-batch-label-2", 580 | ), 581 | ] 582 | ) 583 | 584 | assert len(result) == 2 585 | assert isinstance(result[0], BatchResponse) 586 | assert isinstance(result[1], BatchResponse) 587 | 588 | # Verify the messages have the correct labels 589 | message1 = await async_client.message.get(result[0].message_id) 590 | message2 = await async_client.message.get(result[1].message_id) 591 | 592 | assert message1.label == "async-batch-label-1" 593 | assert message2.label == "async-batch-label-2" 594 | 595 | 596 | @pytest.mark.asyncio 597 | async def test_batch_json_with_label_async(async_client: AsyncQStash) -> None: 598 | result = await async_client.message.batch_json( 599 | [ 600 | BatchJsonRequest( 601 | body={"batch": "async-json-1"}, 602 | url="https://mock.httpstatus.io/200", 603 | label="async-batch-json-label-1", 604 | ), 605 | BatchJsonRequest( 606 | body={"batch": "async-json-2"}, 607 | url="https://mock.httpstatus.io/201", 608 | label="async-batch-json-label-2", 609 | ), 610 | ] 611 | ) 612 | 613 | assert len(result) == 2 614 | assert isinstance(result[0], BatchResponse) 615 | assert isinstance(result[1], BatchResponse) 616 | 617 | # Verify the messages have the correct labels 618 | message1 = await async_client.message.get(result[0].message_id) 619 | message2 = await async_client.message.get(result[1].message_id) 620 | 621 | assert message1.label == "async-batch-json-label-1" 622 | assert message2.label == "async-batch-json-label-2" 623 | 624 | 625 | @pytest.mark.asyncio 626 | async def test_log_filtering_by_label_async(async_client: AsyncQStash) -> None: 627 | # Publish a message with a specific label 628 | res = await async_client.message.publish( 629 | body="test-async-log-filtering", 630 | url="https://mock.httpstatus.io/200", 631 | label="async-log-filter-test", 632 | ) 633 | 634 | assert isinstance(res, PublishResponse) 635 | 636 | # Wait for message delivery and then check logs with label filter 637 | async def assertion() -> None: 638 | logs = ( 639 | await async_client.log.list( 640 | filter={ 641 | "label": "async-log-filter-test", 642 | } 643 | ) 644 | ).logs 645 | 646 | # Should find at least one log entry with our label 647 | assert len(logs) > 0 648 | # Verify that all returned logs have the expected label 649 | for log in logs: 650 | assert log.label == "async-log-filter-test" 651 | 652 | await assert_eventually_async( 653 | assertion, 654 | initial_delay=1.0, 655 | retry_delay=1.0, 656 | timeout=30.0, 657 | ) 658 | -------------------------------------------------------------------------------- /qstash/asyncio/message.py: -------------------------------------------------------------------------------- 1 | import json 2 | from typing import Any, Dict, List, Optional, Union 3 | 4 | from qstash.asyncio.http import AsyncHttpClient 5 | from qstash.http import HttpMethod 6 | from qstash.message import ( 7 | ApiT, 8 | FlowControl, 9 | BatchJsonRequest, 10 | BatchRequest, 11 | BatchResponse, 12 | BatchUrlGroupResponse, 13 | EnqueueResponse, 14 | EnqueueUrlGroupResponse, 15 | Message, 16 | PublishResponse, 17 | PublishUrlGroupResponse, 18 | convert_to_batch_messages, 19 | get_destination, 20 | parse_batch_response, 21 | parse_enqueue_response, 22 | parse_message_response, 23 | parse_publish_response, 24 | prepare_batch_message_body, 25 | prepare_headers, 26 | ) 27 | 28 | 29 | class AsyncMessageApi: 30 | def __init__(self, http: AsyncHttpClient): 31 | self._http = http 32 | 33 | async def publish( 34 | self, 35 | *, 36 | url: Optional[str] = None, 37 | url_group: Optional[str] = None, 38 | api: Optional[ApiT] = None, 39 | body: Optional[Union[str, bytes]] = None, 40 | content_type: Optional[str] = None, 41 | method: Optional[HttpMethod] = None, 42 | headers: Optional[Dict[str, str]] = None, 43 | callback_headers: Optional[Dict[str, str]] = None, 44 | failure_callback_headers: Optional[Dict[str, str]] = None, 45 | retries: Optional[int] = None, 46 | retry_delay: Optional[str] = None, 47 | callback: Optional[str] = None, 48 | failure_callback: Optional[str] = None, 49 | delay: Optional[Union[str, int]] = None, 50 | not_before: Optional[int] = None, 51 | deduplication_id: Optional[str] = None, 52 | content_based_deduplication: Optional[bool] = None, 53 | timeout: Optional[Union[str, int]] = None, 54 | flow_control: Optional[FlowControl] = None, 55 | label: Optional[str] = None, 56 | ) -> Union[PublishResponse, List[PublishUrlGroupResponse]]: 57 | """ 58 | Publishes a message to QStash. 59 | 60 | If the destination is a `url` or an `api`, `PublishResponse` 61 | is returned. 62 | 63 | If the destination is a `url_group`, then a list of 64 | `PublishUrlGroupResponse`s are returned, one for each url 65 | in the url group. 66 | 67 | :param url: Url to send the message to. 68 | :param url_group: Url group to send the message to. 69 | :param api: Api to send the message to. 70 | :param body: The raw request message body passed to the destination as is. 71 | :param content_type: MIME type of the message. 72 | :param method: The HTTP method to use when sending a webhook to your API. 73 | :param headers: Headers to forward along with the message. 74 | :param callback_headers: Headers to forward along with the callback message. 75 | :param failure_callback_headers: Headers to forward along with the failure 76 | callback message. 77 | :param retries: How often should this message be retried in case the destination 78 | API is not available. 79 | :param retry_delay: Delay between retries. 80 | 81 | By default, the `retryDelay` is exponential backoff. 82 | More details can be found in: https://upstash.com/docs/qstash/features/retry. 83 | 84 | The `retryDelay` option allows you to customize the delay (in milliseconds) between retry attempts when message delivery fails. 85 | 86 | You can use mathematical expressions and the following built-in functions to calculate the delay dynamically. 87 | The special variable `retried` represents the current retry attempt count (starting from 0). 88 | 89 | Supported functions: 90 | - `pow` 91 | - `sqrt` 92 | - `abs` 93 | - `exp` 94 | - `floor` 95 | - `ceil` 96 | - `round` 97 | - `min` 98 | - `max` 99 | 100 | Examples of valid `retryDelay` values: 101 | ```py 102 | 1000 # 1 second 103 | 1000 * (1 + retried) # 1 second multiplied by the current retry attempt 104 | pow(2, retried) # 2 to the power of the current retry attempt 105 | max(10, pow(2, retried)) # The greater of 10 or 2^retried 106 | ``` 107 | :param callback: A callback url that will be called after each attempt. 108 | :param failure_callback: A failure callback url that will be called when a delivery 109 | is failed, that is when all the defined retries are exhausted. 110 | :param delay: Delay the message delivery. The format for the delay string is a 111 | number followed by duration abbreviation, like `10s`. Available durations 112 | are `s` (seconds), `m` (minutes), `h` (hours), and `d` (days). As convenience, 113 | it is also possible to specify the delay as an integer, which will be 114 | interpreted as delay in seconds. 115 | :param not_before: Delay the message until a certain time in the future. 116 | The format is a unix timestamp in seconds, based on the UTC timezone. 117 | :param deduplication_id: Id to use while deduplicating messages. 118 | :param content_based_deduplication: Automatically deduplicate messages based on 119 | their content. 120 | :param timeout: The HTTP timeout value to use while calling the destination URL. 121 | When a timeout is specified, it will be used instead of the maximum timeout 122 | value permitted by the QStash plan. It is useful in scenarios, where a message 123 | should be delivered with a shorter timeout. 124 | :param flow_control: Settings for controlling the number of active requests, 125 | as well as the rate of requests with the same flow control key. 126 | :param label: Assign a label to the request to filter logs with it later. 127 | """ 128 | headers = headers or {} 129 | destination = get_destination( 130 | url=url, 131 | url_group=url_group, 132 | api=api, 133 | headers=headers, 134 | ) 135 | 136 | req_headers = prepare_headers( 137 | content_type=content_type, 138 | method=method, 139 | headers=headers, 140 | callback_headers=callback_headers, 141 | failure_callback_headers=failure_callback_headers, 142 | retries=retries, 143 | retry_delay=retry_delay, 144 | callback=callback, 145 | failure_callback=failure_callback, 146 | delay=delay, 147 | not_before=not_before, 148 | deduplication_id=deduplication_id, 149 | content_based_deduplication=content_based_deduplication, 150 | timeout=timeout, 151 | flow_control=flow_control, 152 | label=label, 153 | ) 154 | 155 | response = await self._http.request( 156 | path=f"/v2/publish/{destination}", 157 | method="POST", 158 | headers=req_headers, 159 | body=body, 160 | ) 161 | 162 | return parse_publish_response(response) 163 | 164 | async def publish_json( 165 | self, 166 | *, 167 | url: Optional[str] = None, 168 | url_group: Optional[str] = None, 169 | api: Optional[ApiT] = None, 170 | body: Optional[Any] = None, 171 | method: Optional[HttpMethod] = None, 172 | headers: Optional[Dict[str, str]] = None, 173 | callback_headers: Optional[Dict[str, str]] = None, 174 | failure_callback_headers: Optional[Dict[str, str]] = None, 175 | retries: Optional[int] = None, 176 | retry_delay: Optional[str] = None, 177 | callback: Optional[str] = None, 178 | failure_callback: Optional[str] = None, 179 | delay: Optional[Union[str, int]] = None, 180 | not_before: Optional[int] = None, 181 | deduplication_id: Optional[str] = None, 182 | content_based_deduplication: Optional[bool] = None, 183 | timeout: Optional[Union[str, int]] = None, 184 | flow_control: Optional[FlowControl] = None, 185 | label: Optional[str] = None, 186 | ) -> Union[PublishResponse, List[PublishUrlGroupResponse]]: 187 | """ 188 | Publish a message to QStash, automatically serializing the 189 | body as JSON string, and setting content type to `application/json`. 190 | 191 | If the destination is a `url` or an `api`, `PublishResponse` 192 | is returned. 193 | 194 | If the destination is a `url_group`, then a list of 195 | `PublishUrlGroupResponse`s are returned, one for each url 196 | in the url group. 197 | 198 | :param url: Url to send the message to. 199 | :param url_group: Url group to send the message to. 200 | :param api: Api to send the message to. 201 | :param body: The request message body passed to the destination after being 202 | serialized as JSON string. 203 | :param method: The HTTP method to use when sending a webhook to your API. 204 | :param headers: Headers to forward along with the message. 205 | :param callback_headers: Headers to forward along with the callback message. 206 | :param failure_callback_headers: Headers to forward along with the failure 207 | callback message. 208 | :param retries: How often should this message be retried in case the destination 209 | API is not available. 210 | :param retry_delay: Delay between retries. 211 | 212 | By default, the `retryDelay` is exponential backoff. 213 | More details can be found in: https://upstash.com/docs/qstash/features/retry. 214 | 215 | The `retryDelay` option allows you to customize the delay (in milliseconds) between retry attempts when message delivery fails. 216 | 217 | You can use mathematical expressions and the following built-in functions to calculate the delay dynamically. 218 | The special variable `retried` represents the current retry attempt count (starting from 0). 219 | 220 | Supported functions: 221 | - `pow` 222 | - `sqrt` 223 | - `abs` 224 | - `exp` 225 | - `floor` 226 | - `ceil` 227 | - `round` 228 | - `min` 229 | - `max` 230 | 231 | Examples of valid `retryDelay` values: 232 | ```py 233 | 1000 # 1 second 234 | 1000 * (1 + retried) # 1 second multiplied by the current retry attempt 235 | pow(2, retried) # 2 to the power of the current retry attempt 236 | max(10, pow(2, retried)) # The greater of 10 or 2^retried 237 | ``` 238 | :param callback: A callback url that will be called after each attempt. 239 | :param failure_callback: A failure callback url that will be called when a delivery 240 | is failed, that is when all the defined retries are exhausted. 241 | :param delay: Delay the message delivery. The format for the delay string is a 242 | number followed by duration abbreviation, like `10s`. Available durations 243 | are `s` (seconds), `m` (minutes), `h` (hours), and `d` (days). As convenience, 244 | it is also possible to specify the delay as an integer, which will be 245 | interpreted as delay in seconds. 246 | :param not_before: Delay the message until a certain time in the future. 247 | The format is a unix timestamp in seconds, based on the UTC timezone. 248 | :param deduplication_id: Id to use while deduplicating messages. 249 | :param content_based_deduplication: Automatically deduplicate messages based on 250 | their content. 251 | :param timeout: The HTTP timeout value to use while calling the destination URL. 252 | When a timeout is specified, it will be used instead of the maximum timeout 253 | value permitted by the QStash plan. It is useful in scenarios, where a message 254 | should be delivered with a shorter timeout. 255 | :param flow_control: Settings for controlling the number of active requests, 256 | as well as the rate of requests with the same flow control key. 257 | :param label: Assign a label to the request to filter logs with it later. 258 | """ 259 | return await self.publish( 260 | url=url, 261 | url_group=url_group, 262 | api=api, 263 | body=json.dumps(body), 264 | content_type="application/json", 265 | method=method, 266 | headers=headers, 267 | callback_headers=callback_headers, 268 | failure_callback_headers=failure_callback_headers, 269 | retries=retries, 270 | retry_delay=retry_delay, 271 | callback=callback, 272 | failure_callback=failure_callback, 273 | delay=delay, 274 | not_before=not_before, 275 | deduplication_id=deduplication_id, 276 | content_based_deduplication=content_based_deduplication, 277 | timeout=timeout, 278 | flow_control=flow_control, 279 | label=label, 280 | ) 281 | 282 | async def enqueue( 283 | self, 284 | *, 285 | queue: str, 286 | url: Optional[str] = None, 287 | url_group: Optional[str] = None, 288 | api: Optional[ApiT] = None, 289 | body: Optional[Union[str, bytes]] = None, 290 | content_type: Optional[str] = None, 291 | method: Optional[HttpMethod] = None, 292 | headers: Optional[Dict[str, str]] = None, 293 | callback_headers: Optional[Dict[str, str]] = None, 294 | failure_callback_headers: Optional[Dict[str, str]] = None, 295 | retries: Optional[int] = None, 296 | retry_delay: Optional[str] = None, 297 | callback: Optional[str] = None, 298 | failure_callback: Optional[str] = None, 299 | deduplication_id: Optional[str] = None, 300 | content_based_deduplication: Optional[bool] = None, 301 | timeout: Optional[Union[str, int]] = None, 302 | label: Optional[str] = None, 303 | ) -> Union[EnqueueResponse, List[EnqueueUrlGroupResponse]]: 304 | """ 305 | Enqueues a message, after creating the queue if it does 306 | not exist. 307 | 308 | If the destination is a `url` or an `api`, `EnqueueResponse` 309 | is returned. 310 | 311 | If the destination is a `url_group`, then a list of 312 | `EnqueueUrlGroupResponse`s are returned, one for each url 313 | in the url group. 314 | 315 | :param queue: The name of the queue. 316 | :param url: Url to send the message to. 317 | :param url_group: Url group to send the message to. 318 | :param api: Api to send the message to. 319 | :param body: The raw request message body passed to the destination as is. 320 | :param content_type: MIME type of the message. 321 | :param method: The HTTP method to use when sending a webhook to your API. 322 | :param headers: Headers to forward along with the message. 323 | :param callback_headers: Headers to forward along with the callback message. 324 | :param failure_callback_headers: Headers to forward along with the failure 325 | callback message. 326 | :param retries: How often should this message be retried in case the destination 327 | API is not available. 328 | :param retry_delay: Delay between retries. 329 | 330 | By default, the `retryDelay` is exponential backoff. 331 | More details can be found in: https://upstash.com/docs/qstash/features/retry. 332 | 333 | The `retryDelay` option allows you to customize the delay (in milliseconds) between retry attempts when message delivery fails. 334 | 335 | You can use mathematical expressions and the following built-in functions to calculate the delay dynamically. 336 | The special variable `retried` represents the current retry attempt count (starting from 0). 337 | 338 | Supported functions: 339 | - `pow` 340 | - `sqrt` 341 | - `abs` 342 | - `exp` 343 | - `floor` 344 | - `ceil` 345 | - `round` 346 | - `min` 347 | - `max` 348 | 349 | Examples of valid `retryDelay` values: 350 | ```py 351 | 1000 # 1 second 352 | 1000 * (1 + retried) # 1 second multiplied by the current retry attempt 353 | pow(2, retried) # 2 to the power of the current retry attempt 354 | max(10, pow(2, retried)) # The greater of 10 or 2^retried 355 | ``` 356 | :param callback: A callback url that will be called after each attempt. 357 | :param failure_callback: A failure callback url that will be called when a delivery 358 | is failed, that is when all the defined retries are exhausted. 359 | :param deduplication_id: Id to use while deduplicating messages. 360 | :param content_based_deduplication: Automatically deduplicate messages based on 361 | their content. 362 | :param timeout: The HTTP timeout value to use while calling the destination URL. 363 | When a timeout is specified, it will be used instead of the maximum timeout 364 | value permitted by the QStash plan. It is useful in scenarios, where a message 365 | should be delivered with a shorter timeout. 366 | :param label: Assign a label to the request to filter logs with it later. 367 | """ 368 | headers = headers or {} 369 | destination = get_destination( 370 | url=url, 371 | url_group=url_group, 372 | api=api, 373 | headers=headers, 374 | ) 375 | 376 | req_headers = prepare_headers( 377 | content_type=content_type, 378 | method=method, 379 | headers=headers, 380 | callback_headers=callback_headers, 381 | failure_callback_headers=failure_callback_headers, 382 | retries=retries, 383 | retry_delay=retry_delay, 384 | callback=callback, 385 | failure_callback=failure_callback, 386 | delay=None, 387 | not_before=None, 388 | deduplication_id=deduplication_id, 389 | content_based_deduplication=content_based_deduplication, 390 | timeout=timeout, 391 | flow_control=None, 392 | label=label, 393 | ) 394 | 395 | response = await self._http.request( 396 | path=f"/v2/enqueue/{queue}/{destination}", 397 | method="POST", 398 | headers=req_headers, 399 | body=body, 400 | ) 401 | 402 | return parse_enqueue_response(response) 403 | 404 | async def enqueue_json( 405 | self, 406 | *, 407 | queue: str, 408 | url: Optional[str] = None, 409 | url_group: Optional[str] = None, 410 | api: Optional[ApiT] = None, 411 | body: Optional[Any] = None, 412 | method: Optional[HttpMethod] = None, 413 | headers: Optional[Dict[str, str]] = None, 414 | callback_headers: Optional[Dict[str, str]] = None, 415 | failure_callback_headers: Optional[Dict[str, str]] = None, 416 | retries: Optional[int] = None, 417 | retry_delay: Optional[str] = None, 418 | callback: Optional[str] = None, 419 | failure_callback: Optional[str] = None, 420 | deduplication_id: Optional[str] = None, 421 | content_based_deduplication: Optional[bool] = None, 422 | timeout: Optional[Union[str, int]] = None, 423 | label: Optional[str] = None, 424 | ) -> Union[EnqueueResponse, List[EnqueueUrlGroupResponse]]: 425 | """ 426 | Enqueues a message, after creating the queue if it does 427 | not exist. It automatically serializes the body as JSON string, 428 | and setting content type to `application/json`. 429 | 430 | If the destination is a `url` or an `api`, `EnqueueResponse` 431 | is returned. 432 | 433 | If the destination is a `url_group`, then a list of 434 | `EnqueueUrlGroupResponse`s are returned, one for each url 435 | in the url group. 436 | 437 | :param queue: The name of the queue. 438 | :param url: Url to send the message to. 439 | :param url_group: Url group to send the message to. 440 | :param api: Api to send the message to. 441 | :param body: The request message body passed to the destination after being 442 | serialized as JSON string. 443 | :param method: The HTTP method to use when sending a webhook to your API. 444 | :param headers: Headers to forward along with the message. 445 | :param callback_headers: Headers to forward along with the callback message. 446 | :param failure_callback_headers: Headers to forward along with the failure 447 | callback message. 448 | :param retries: How often should this message be retried in case the destination 449 | API is not available. 450 | :param retry_delay: Delay between retries. 451 | 452 | By default, the `retryDelay` is exponential backoff. 453 | More details can be found in: https://upstash.com/docs/qstash/features/retry. 454 | 455 | The `retryDelay` option allows you to customize the delay (in milliseconds) between retry attempts when message delivery fails. 456 | 457 | You can use mathematical expressions and the following built-in functions to calculate the delay dynamically. 458 | The special variable `retried` represents the current retry attempt count (starting from 0). 459 | 460 | Supported functions: 461 | - `pow` 462 | - `sqrt` 463 | - `abs` 464 | - `exp` 465 | - `floor` 466 | - `ceil` 467 | - `round` 468 | - `min` 469 | - `max` 470 | 471 | Examples of valid `retryDelay` values: 472 | ```py 473 | 1000 # 1 second 474 | 1000 * (1 + retried) # 1 second multiplied by the current retry attempt 475 | pow(2, retried) # 2 to the power of the current retry attempt 476 | max(10, pow(2, retried)) # The greater of 10 or 2^retried 477 | ``` 478 | :param callback: A callback url that will be called after each attempt. 479 | :param failure_callback: A failure callback url that will be called when a delivery 480 | is failed, that is when all the defined retries are exhausted. 481 | :param deduplication_id: Id to use while deduplicating messages. 482 | :param content_based_deduplication: Automatically deduplicate messages based on 483 | their content. 484 | :param timeout: The HTTP timeout value to use while calling the destination URL. 485 | When a timeout is specified, it will be used instead of the maximum timeout 486 | value permitted by the QStash plan. It is useful in scenarios, where a message 487 | should be delivered with a shorter timeout. 488 | :param label: Assign a label to the request to filter logs with it later. 489 | """ 490 | return await self.enqueue( 491 | queue=queue, 492 | url=url, 493 | url_group=url_group, 494 | api=api, 495 | body=json.dumps(body), 496 | content_type="application/json", 497 | method=method, 498 | headers=headers, 499 | callback_headers=callback_headers, 500 | failure_callback_headers=failure_callback_headers, 501 | retries=retries, 502 | retry_delay=retry_delay, 503 | callback=callback, 504 | failure_callback=failure_callback, 505 | deduplication_id=deduplication_id, 506 | content_based_deduplication=content_based_deduplication, 507 | timeout=timeout, 508 | label=label, 509 | ) 510 | 511 | async def batch( 512 | self, messages: List[BatchRequest] 513 | ) -> List[Union[BatchResponse, List[BatchUrlGroupResponse]]]: 514 | """ 515 | Publishes or enqueues multiple messages in a single request. 516 | 517 | Returns a list of publish or enqueue responses, one for each 518 | message in the batch. 519 | 520 | If the message in the batch is sent to a url or an API, 521 | the corresponding item in the response is `BatchResponse`. 522 | 523 | If the message in the batch is sent to a url group, 524 | the corresponding item in the response is list of 525 | `BatchUrlGroupResponse`s, one for each url in the url group. 526 | """ 527 | body = prepare_batch_message_body(messages) 528 | 529 | response = await self._http.request( 530 | path="/v2/batch", 531 | body=body, 532 | headers={"Content-Type": "application/json"}, 533 | method="POST", 534 | ) 535 | 536 | return parse_batch_response(response) 537 | 538 | async def batch_json( 539 | self, messages: List[BatchJsonRequest] 540 | ) -> List[Union[BatchResponse, List[BatchUrlGroupResponse]]]: 541 | """ 542 | Publishes or enqueues multiple messages in a single request, 543 | automatically serializing the message bodies as JSON strings, 544 | and setting content type to `application/json`. 545 | 546 | Returns a list of publish or enqueue responses, one for each 547 | message in the batch. 548 | 549 | If the message in the batch is sent to a url or an API, 550 | the corresponding item in the response is `BatchResponse`. 551 | 552 | If the message in the batch is sent to a url group, 553 | the corresponding item in the response is list of 554 | `BatchUrlGroupResponse`s, one for each url in the url group. 555 | """ 556 | batch_messages = convert_to_batch_messages(messages) 557 | return await self.batch(batch_messages) 558 | 559 | async def get(self, message_id: str) -> Message: 560 | """ 561 | Gets the message by its id. 562 | """ 563 | response = await self._http.request( 564 | path=f"/v2/messages/{message_id}", 565 | method="GET", 566 | ) 567 | 568 | return parse_message_response(response) 569 | 570 | async def cancel(self, message_id: str) -> None: 571 | """ 572 | Cancels delivery of an existing message. 573 | 574 | Cancelling a message will remove it from QStash and stop it from being 575 | delivered in the future. If a message is in flight to your API, 576 | it might be too late to cancel. 577 | """ 578 | await self._http.request( 579 | path=f"/v2/messages/{message_id}", 580 | method="DELETE", 581 | parse_response=False, 582 | ) 583 | 584 | async def cancel_many(self, message_ids: List[str]) -> int: 585 | """ 586 | Cancels delivery of existing messages. 587 | 588 | Cancelling a message will remove it from QStash and stop it from being 589 | delivered in the future. If a message is in flight to your API, 590 | it might be too late to cancel. 591 | 592 | Returns how many of the messages are cancelled. 593 | """ 594 | body = json.dumps({"messageIds": message_ids}) 595 | 596 | response = await self._http.request( 597 | path="/v2/messages", 598 | method="DELETE", 599 | headers={"Content-Type": "application/json"}, 600 | body=body, 601 | ) 602 | 603 | return response["cancelled"] # type:ignore[no-any-return] 604 | 605 | async def cancel_all(self) -> int: 606 | """ 607 | Cancels delivery of all the existing messages. 608 | 609 | Cancelling a message will remove it from QStash and stop it from being 610 | delivered in the future. If a message is in flight to your API, 611 | it might be too late to cancel. 612 | 613 | Returns how many messages are cancelled. 614 | """ 615 | response = await self._http.request( 616 | path="/v2/messages", 617 | method="DELETE", 618 | ) 619 | 620 | return response["cancelled"] # type:ignore[no-any-return] 621 | --------------------------------------------------------------------------------