20 | This extension allows you to mirror resources from real AWS accounts into your LocalStack instance, thereby "bridging the gap" between local and remote cloud resources.
21 |
22 | Some common use cases are: developing a local Lambda function that accesses a remote DynamoDB table; running a local Athena SQL query in LocalStack accessing files in a real S3 bucket in AWS; seeding a local Terraform script with SSM parameters from a real AWS account
23 |
24 |
25 |
26 |
27 |
28 |
29 |
--------------------------------------------------------------------------------
/aws-replicator/aws_replicator/shared/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/localstack/localstack-extensions/9519b7b9f7435618d5f9dc9895000fdf97000d1d/aws-replicator/aws_replicator/shared/__init__.py
--------------------------------------------------------------------------------
/aws-replicator/aws_replicator/shared/constants.py:
--------------------------------------------------------------------------------
1 | # header name for the original request host name forwarded in the request to the target proxy handler
2 | HEADER_HOST_ORIGINAL = "x-ls-host-original"
3 |
--------------------------------------------------------------------------------
/aws-replicator/aws_replicator/shared/models.py:
--------------------------------------------------------------------------------
1 | import logging
2 | from abc import ABC, abstractmethod
3 | from typing import Any, Dict, List, Optional, TypedDict, Union
4 |
5 | LOG = logging.getLogger(__name__)
6 |
7 |
8 | class ReplicateStateRequest(TypedDict):
9 | """
10 | Represents a request sent from the CLI to the extension request
11 | handler to inject additional resource state properties.
12 | Using upper-case property names, to stay in line with CloudFormation/CloudControl resource models.
13 | """
14 |
15 | # resource type name (e.g., "AWS::S3::Bucket")
16 | Type: str
17 | # identifier of the resource
18 | PhysicalResourceId: Optional[str]
19 | # resource properties
20 | Properties: Dict[str, Any]
21 |
22 |
23 | class ResourceReplicator(ABC):
24 | """
25 | Interface for resource replicator, to effect the creation of a cloned resource inside LocalStack.
26 | This interface has a client-side and a server-side implementation.
27 | """
28 |
29 | @abstractmethod
30 | def create(self, resource: Dict):
31 | """Create the resource specified via the given resource dict."""
32 |
33 | @abstractmethod
34 | def create_all(self):
35 | """Scrape and replicate all resources from the source AWS account into LocalStack."""
36 |
37 |
38 | class ProxyServiceConfig(TypedDict, total=False):
39 | # list of regexes identifying resources to be proxied requests to
40 | resources: Union[str, List[str]]
41 | # list of operation names (regexes) that should be proxied
42 | operations: List[str]
43 | # whether only read requests should be forwarded
44 | read_only: bool
45 |
46 |
47 | class ProxyConfig(TypedDict, total=False):
48 | # maps service name to service proxy configs
49 | services: Dict[str, ProxyServiceConfig]
50 | # bind host for the proxy (defaults to 127.0.0.1)
51 | bind_host: str
52 |
53 |
54 | class ProxyInstance(TypedDict):
55 | """Represents a proxy instance"""
56 |
57 | # port of the proxy on the host
58 | port: int
59 | # configuration for the proxy
60 | config: ProxyConfig
61 |
62 |
63 | class AddProxyRequest(ProxyInstance):
64 | """
65 | Represents a request to register a new local proxy instance with the extension inside LocalStack.
66 | """
67 |
68 | env_vars: dict
69 |
--------------------------------------------------------------------------------
/aws-replicator/aws_replicator/shared/utils.py:
--------------------------------------------------------------------------------
1 | from typing import Any, Callable, Dict, Optional
2 |
3 |
4 | def list_all_resources(
5 | page_function: Callable[[dict], Any],
6 | last_token_attr_name: str,
7 | list_attr_name: str,
8 | next_token_attr_name: Optional[str] = None,
9 | max_pages=None,
10 | ) -> list:
11 | if next_token_attr_name is None:
12 | next_token_attr_name = last_token_attr_name
13 |
14 | result = None
15 | collected_items = []
16 | last_evaluated_token = None
17 |
18 | pages = 0
19 | while not result or last_evaluated_token:
20 | if max_pages and pages >= max_pages:
21 | break
22 | kwargs = {next_token_attr_name: last_evaluated_token} if last_evaluated_token else {}
23 | result = page_function(kwargs)
24 | last_evaluated_token = result.get(last_token_attr_name)
25 | collected_items += result.get(list_attr_name, [])
26 | pages += 1
27 |
28 | return collected_items
29 |
30 |
31 | def get_resource_type(resource: Dict) -> str:
32 | return resource.get("Type") or resource.get("TypeName")
33 |
--------------------------------------------------------------------------------
/aws-replicator/etc/aws-replicate-overview.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/localstack/localstack-extensions/9519b7b9f7435618d5f9dc9895000fdf97000d1d/aws-replicator/etc/aws-replicate-overview.png
--------------------------------------------------------------------------------
/aws-replicator/etc/proxy-settings.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/localstack/localstack-extensions/9519b7b9f7435618d5f9dc9895000fdf97000d1d/aws-replicator/etc/proxy-settings.png
--------------------------------------------------------------------------------
/aws-replicator/example/Makefile:
--------------------------------------------------------------------------------
1 | usage: ## Show this help
2 | @fgrep -h "##" $(MAKEFILE_LIST) | fgrep -v fgrep | sed -e 's/:.*##\s*/##/g' | awk -F'##' '{ printf "%-15s %s\n", $$1, $$2 }'
3 |
4 | test: ## Run the end-to-end test with a simple sample app
5 | echo "Deploying sample application ..."; \
6 | echo "Creating SQS queue in real AWS"; \
7 | aws sqs create-queue --queue-name test-queue1; \
8 | queueUrl=$$(aws sqs get-queue-url --queue-name test-queue1 | jq -r .QueueUrl); \
9 | echo "Starting AWS replicator proxy"; \
10 | (DEBUG=1 localstack aws proxy -s s3,sqs --host 0.0.0.0 & ); \
11 | echo "Deploying Terraform template locally"; \
12 | tflocal init; \
13 | tflocal apply -auto-approve; \
14 | echo "Putting a message to the queue in real AWS"; \
15 | aws sqs send-message --queue-url $$queueUrl --message-body '{"test":"foobar 123"}'; \
16 | echo "Waiting a bit for Lambda to be triggered by SQS message ..."; \
17 | sleep 7 # ; \
18 | # TODO: Lambda invocation currently failing in CI:
19 | # [lambda e4cbf96395d8b7d8a94596f96de9ef7d] time="2023-09-16T22:12:04Z" level=panic msg="Post
20 | # \"http://172.17.0.2:443/_localstack_lambda/e4cbf96395d8b7d8a94596f96de9ef7d/status/e4cbf96395d8b7d8a94596f96de9ef7d/ready\":
21 | # dial tcp 172.17.0.2:443: connect: connection refused" func=go.amzn.com/lambda/rapid.handleStart
22 | # file="/home/runner/work/lambda-runtime-init/lambda-runtime-init/lambda/rapid/start.go:473"
23 | # logStream=$$(awslocal logs describe-log-streams --log-group-name /aws/lambda/func1 | jq -r '.logStreams[0].logStreamName'); \
24 | # awslocal logs get-log-events --log-stream-name "$$logStream" --log-group-name /aws/lambda/func1 | grep "foobar 123"; \
25 | # exitCode=$$?; \
26 | # echo "Cleaning up ..."; \
27 | # aws sqs delete-queue --queue-url $$queueUrl; \
28 | # exit $$exitCode
29 |
30 | .PHONY: usage test
31 |
--------------------------------------------------------------------------------
/aws-replicator/example/README.md:
--------------------------------------------------------------------------------
1 | # AWS Proxy Example
2 |
3 | This simple example illustrates how to use the AWS proxy in this extension to transparently run API requests against real AWS.
4 |
5 | 1. First, make sure that the extension is installed and LocalStack is up and running
6 |
7 | 2. Open a new terminal, configure the AWS credentials of your real AWS account, then start the proxy to forward requests for S3 and SQS to real AWS:
8 | ```
9 | $ DEBUG=1 localstack aws proxy -s s3,sqs
10 | ```
11 |
12 | 3. In another terminal, again configure the credentials to point to real AWS, then create an SQS queue (alternatively you can create the queue via the AWS Web console):
13 | ```
14 | $ aws sqs create-queue --queue-name test-queue1
15 | ```
16 |
17 | 4. Use `tflocal` to deploy the sample Terraform script against LocalStack:
18 | ```
19 | $ tflocal init
20 | $ tflocal apply
21 | ```
22 |
23 | 5. Open the AWS console (or use the CLI) and put a new message to the `test-queue1` SQS queue.
24 |
25 | 6. The last command should have triggered a Lambda function invocation in LocalStack, via the SQS event source mapping defined in the Terraform script. If we take a close look at the Lambda output, it should print the S3 buckets of the real AWs account (as S3 requests are also forwarded by the proxy).
26 | ```
27 | >START RequestId: 4692b634-ccf1-1e23-0cd1-8831ddf8c35f Version: $LATEST
28 | > [{'Name': 'my-bucket-1', ...}, {'Name': 'my-bucket-2', ...}]
29 | > END RequestId: 4692b634-ccf1-1e23-0cd1-8831ddf8c35f
30 | ```
--------------------------------------------------------------------------------
/aws-replicator/example/lambda.py:
--------------------------------------------------------------------------------
1 | import boto3
2 |
3 |
4 | def handler(event, context):
5 | s3 = boto3.client("s3")
6 | buckets = s3.list_buckets().get("Buckets")
7 | print("event:", event)
8 | print("buckets:", buckets)
9 | bucket_names = [b["Name"] for b in buckets]
10 | return {"buckets": bucket_names}
11 |
--------------------------------------------------------------------------------
/aws-replicator/example/main.tf:
--------------------------------------------------------------------------------
1 |
2 | resource "aws_lambda_function" "test" {
3 | function_name = "func1"
4 | role = "arn:aws:iam::000000000000:role/test-role"
5 |
6 | s3_bucket = "hot-reload"
7 | s3_key = path.cwd
8 |
9 | handler = "lambda.handler"
10 | runtime = "python3.8"
11 | }
12 |
13 | resource "aws_sqs_queue" "test" {
14 | name = "test-queue1"
15 | }
16 |
17 | resource "aws_lambda_event_source_mapping" "test" {
18 | event_source_arn = aws_sqs_queue.test.arn
19 | function_name = aws_lambda_function.test.arn
20 | }
21 |
--------------------------------------------------------------------------------
/aws-replicator/example/proxy_config.yml:
--------------------------------------------------------------------------------
1 | services:
2 | s3:
3 | resources:
4 | # list of ARNs of S3 buckets to proxy to real AWS
5 | - '.*:bucket1'
6 |
--------------------------------------------------------------------------------
/aws-replicator/logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/localstack/localstack-extensions/9519b7b9f7435618d5f9dc9895000fdf97000d1d/aws-replicator/logo.png
--------------------------------------------------------------------------------
/aws-replicator/pyproject.toml:
--------------------------------------------------------------------------------
1 | [tool.black]
2 | line_length = 100
3 | include = '(aws_replicator|example|tests)/.*\.py$'
4 |
5 | [tool.isort]
6 | profile = 'black'
7 | line_length = 100
8 |
9 | [tool.flake8]
10 | max-line-length = 100
11 | ignore = 'E501'
12 | exclude = './setup.py,.venv*,dist,build'
13 |
--------------------------------------------------------------------------------
/aws-replicator/setup.cfg:
--------------------------------------------------------------------------------
1 | [metadata]
2 | name = localstack-extension-aws-replicator
3 | version = 0.1.24
4 | summary = LocalStack AWS Proxy Extension
5 | description = Proxy AWS resources into your LocalStack instance
6 | long_description = file: README.md
7 | long_description_content_type = text/markdown; charset=UTF-8
8 | url = https://github.com/localstack/localstack-extensions/tree/main/aws-replicator
9 | author = LocalStack Team
10 | author_email = info@localstack.cloud
11 |
12 | [options]
13 | zip_safe = False
14 | packages = find:
15 | install_requires =
16 | # TODO: currently requires a version pin, see note in auth_proxy.py
17 | boto3>=1.26.151
18 | # TODO: currently requires a version pin, see note in auth_proxy.py
19 | botocore>=1.29.151
20 | flask
21 | localstack-client
22 | xmltodict
23 | # TODO: refactor the use of http2_server
24 | hypercorn
25 | h11
26 | quart
27 | # TODO: runtime dependencies below should be removed over time (required for some LS imports)
28 | boto
29 | cbor2
30 | flask-cors
31 | jsonpatch
32 | moto
33 | werkzeug
34 |
35 | [options.extras_require]
36 | test =
37 | apispec
38 | localstack-core
39 | localstack-ext
40 | openapi-spec-validator
41 | pyproject-flake8
42 | pytest
43 | pytest-httpserver
44 | rolo
45 |
46 | [options.package_data]
47 | aws_replicator =
48 | **/*.html
49 | **/*.js
50 | **/*.png
51 |
52 | [options.entry_points]
53 | localstack.extensions =
54 | aws-replicator = aws_replicator.server.extension:AwsReplicatorExtension
55 | localstack.plugins.cli =
56 | aws-replicator = aws_replicator.client.cli:AwsReplicatorPlugin
57 |
--------------------------------------------------------------------------------
/aws-replicator/setup.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | from setuptools import setup
3 |
4 | setup()
5 |
--------------------------------------------------------------------------------
/aws-replicator/tests/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/localstack/localstack-extensions/9519b7b9f7435618d5f9dc9895000fdf97000d1d/aws-replicator/tests/__init__.py
--------------------------------------------------------------------------------
/aws-replicator/tests/conftest.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | from localstack.testing.aws.util import (
3 | base_aws_client_factory,
4 | base_aws_session,
5 | base_testing_aws_client,
6 | )
7 |
8 | pytest_plugins = [
9 | "localstack.testing.pytest.fixtures",
10 | ]
11 |
12 |
13 | @pytest.fixture(scope="session")
14 | def aws_session():
15 | return base_aws_session()
16 |
17 |
18 | @pytest.fixture(scope="session")
19 | def aws_client_factory(aws_session):
20 | return base_aws_client_factory(aws_session)
21 |
22 |
23 | @pytest.fixture(scope="session")
24 | def aws_client(aws_client_factory):
25 | return base_testing_aws_client(aws_client_factory)
26 |
--------------------------------------------------------------------------------
/aws-replicator/tests/test_config.py:
--------------------------------------------------------------------------------
1 | from aws_replicator.server.aws_request_forwarder import AwsProxyHandler
2 | from aws_replicator.shared.models import ProxyServiceConfig
3 |
4 |
5 | def test_get_resource_names():
6 | service_config = ProxyServiceConfig(resources="")
7 | assert AwsProxyHandler._get_resource_names(service_config) == [".*"]
8 |
9 | service_config = ProxyServiceConfig(resources="foobar")
10 | assert AwsProxyHandler._get_resource_names(service_config) == ["foobar"]
11 |
12 | service_config = ProxyServiceConfig(resources=["foo", "bar"])
13 | assert AwsProxyHandler._get_resource_names(service_config) == ["foo", "bar"]
14 |
--------------------------------------------------------------------------------
/bin/generate-extension-table.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | import configparser
4 | import glob
5 | import os
6 |
7 |
8 | def main():
9 | root_path = os.path.join(os.path.dirname(__file__), '..')
10 |
11 | distributions = []
12 | for match in glob.glob("**/setup.cfg", root_dir=root_path):
13 | cfg = configparser.ConfigParser()
14 | cfg.read(os.path.join(root_path, match))
15 |
16 | distributions.append(cfg['metadata'])
17 |
18 | print("| Extension | Install name | Version | Support status |")
19 | print("| --------- | ------------ | ------- | -------------- |")
20 |
21 | for metadata in sorted(distributions, key=lambda k: k['name']):
22 | display_name = metadata['summary'].removeprefix("LocalStack Extension: ")
23 | print(f"| [{display_name}]({metadata['url']}) | {metadata['name']} | {metadata['version']} | ? |")
24 |
25 |
26 | if __name__ == "__main__":
27 | main()
28 |
--------------------------------------------------------------------------------
/diagnosis-viewer/Makefile:
--------------------------------------------------------------------------------
1 | VENV_BIN = python3 -m venv
2 | VENV_DIR ?= .venv
3 | VENV_ACTIVATE = $(VENV_DIR)/bin/activate
4 | VENV_RUN = . $(VENV_ACTIVATE)
5 |
6 | venv: $(VENV_ACTIVATE)
7 |
8 | $(VENV_ACTIVATE): setup.py setup.cfg
9 | test -d .venv || $(VENV_BIN) .venv
10 | $(VENV_RUN); pip install --upgrade pip setuptools plux wheel
11 | touch $(VENV_DIR)/bin/activate
12 |
13 | clean:
14 | rm -rf .venv/
15 | rm -rf build/
16 | rm -rf .eggs/
17 | rm -rf *.egg-info/
18 |
19 | install: venv
20 | $(VENV_RUN); python -m pip install -e .[dev]
21 |
22 | dist: venv
23 | $(VENV_RUN); python setup.py sdist bdist_wheel
24 |
25 | publish: clean-dist venv dist
26 | $(VENV_RUN); pip install --upgrade twine; twine upload dist/*
27 |
28 | clean-dist: clean
29 | rm -rf dist/
30 |
31 | .PHONY: clean clean-dist dist install publish
32 |
--------------------------------------------------------------------------------
/diagnosis-viewer/README.md:
--------------------------------------------------------------------------------
1 | Diagnosis Viewer
2 | ===============================
3 | [](https://app.localstack.cloud/extensions/remote?url=git+https://github.com/localstack/localstack-extensions/#egg=localstack-extension-diagnosis-viewer&subdirectory=diagnosis-viewer)
4 |
5 | View the diagnostics endpoint directly in localstack
6 |
7 | ## Access Diagnosis Data
8 |
9 | The extension is a web UI for the diagnosis endpoint of LocalStack, which is enabled when LocalStack is started with `DEBUG=1` and available at `curl -s localhost:4566/_localstack/diagnose`.
10 | The web UI can then be reached at `http://localhost:4566/diapretty`.
11 |
12 |
13 | ## Installation
14 |
15 | Install the extension by running:
16 |
17 | ```bash
18 | localstack extensions install localstack-extension-diagnosis-viewer
19 | ```
20 |
21 | ## Development
22 |
23 | ### Install local development version
24 |
25 | To install the extension into localstack in developer mode, you will need Python 3.10, and create a virtual environment in the extensions project.
26 |
27 | In the newly generated project, simply run
28 |
29 | ```bash
30 | make install
31 | ```
32 |
33 | Then, to enable the extension for LocalStack, run
34 |
35 | ```bash
36 | localstack extensions dev enable .
37 | ```
38 |
39 | You can then start LocalStack with `EXTENSION_DEV_MODE=1` to load all enabled extensions.
40 | Make sure to also set `DEBUG=1` so the diagnose endpoint necessary to populate the report is loaded.
41 |
42 | ```bash
43 | EXTENSION_DEV_MODE=1 DEBUG=1 localstack start
44 | ```
45 |
--------------------------------------------------------------------------------
/diagnosis-viewer/diagnosis_viewer/__init__.py:
--------------------------------------------------------------------------------
1 | name = "diagnosis_viewer"
2 |
--------------------------------------------------------------------------------
/diagnosis-viewer/diagnosis_viewer/extension.py:
--------------------------------------------------------------------------------
1 | import logging
2 |
3 | from localstack.extensions.api import Extension, http
4 |
5 | LOG = logging.getLogger(__name__)
6 |
7 |
8 | class DiagnosisViewerExtension(Extension):
9 | name = "diagnosis-viewer"
10 |
11 | def update_gateway_routes(self, router: http.Router[http.RouteHandler]):
12 | from diapretty.server.api import DiagnoseServer
13 | api = DiagnoseServer()
14 | router.add("/diapretty", api.serve)
15 |
--------------------------------------------------------------------------------
/diagnosis-viewer/logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/localstack/localstack-extensions/9519b7b9f7435618d5f9dc9895000fdf97000d1d/diagnosis-viewer/logo.png
--------------------------------------------------------------------------------
/diagnosis-viewer/setup.cfg:
--------------------------------------------------------------------------------
1 | [metadata]
2 | name = localstack-extension-diagnosis-viewer
3 | version = 0.1.0
4 | url = https://github.com/localstack/localstack-extensions/tree/main/diagnosis-viewer
5 | summary: LocalStack Extension: Diagnosis Viewer
6 | author = LocalStack Contributors
7 | author_email = info@localstack.cloud
8 | description = View the diagnostics endpoint directly in localstack
9 | long_description = file: README.md
10 | long_description_content_type = text/markdown; charset=UTF-8
11 |
12 | [options]
13 | zip_safe = False
14 | packages = find:
15 | install_requires =
16 | diapretty
17 |
18 | [options.extras_require]
19 | dev =
20 | localstack-core>=1.4
21 |
22 | [options.entry_points]
23 | localstack.extensions =
24 | diagnosis-viewer = diagnosis_viewer.extension:DiagnosisViewerExtension
25 |
--------------------------------------------------------------------------------
/diagnosis-viewer/setup.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | from setuptools import setup
3 |
4 | setup()
5 |
--------------------------------------------------------------------------------
/hello-world/Makefile:
--------------------------------------------------------------------------------
1 | VENV_BIN = python3 -m venv
2 | VENV_DIR ?= .venv
3 | VENV_ACTIVATE = $(VENV_DIR)/bin/activate
4 | VENV_RUN = . $(VENV_ACTIVATE)
5 |
6 | venv: $(VENV_ACTIVATE)
7 |
8 | $(VENV_ACTIVATE): setup.py setup.cfg
9 | test -d .venv || $(VENV_BIN) .venv
10 | $(VENV_RUN); pip install --upgrade pip setuptools plux wheel
11 | $(VENV_RUN); pip install --upgrade black isort pyproject-flake8 flake8-black flake8-isort
12 | touch $(VENV_DIR)/bin/activate
13 |
14 | clean:
15 | rm -rf .venv/
16 | rm -rf build/
17 | rm -rf .eggs/
18 | rm -rf *.egg-info/
19 |
20 | lint: ## Run code linter to check code style
21 | ($(VENV_RUN); python -m pflake8 --show-source)
22 |
23 | format: ## Run black and isort code formatter
24 | $(VENV_RUN); python -m isort helloworld; python -m black helloworld
25 |
26 | install: venv
27 | $(VENV_RUN); python -m pip install -e .[dev]
28 |
29 | dist: venv
30 | $(VENV_RUN); python setup.py sdist bdist_wheel
31 |
32 | publish: clean-dist venv dist
33 | $(VENV_RUN); pip install --upgrade twine; twine upload dist/*
34 |
35 | clean-dist: clean
36 | rm -rf dist/
37 |
38 | .PHONY: clean clean-dist dist install publish
39 |
--------------------------------------------------------------------------------
/hello-world/README.md:
--------------------------------------------------------------------------------
1 | Hello World LocalStack extension
2 | ================================
3 | [](https://app.localstack.cloud/extensions/remote?url=git+https://github.com/localstack/localstack-extensions/#egg=localstack-extension-hello-world&subdirectory=hello-world)
4 |
5 | A minimal LocalStack extension.
6 |
7 | ## What does it do?
8 |
9 | It just prints a message to stdout once LocalStack starts, and a second time once the platform is ready to serve requests.
10 |
11 | ## Installing
12 |
13 | ```bash
14 | localstack extensions install localstack-extension-hello-world
15 | ```
16 |
--------------------------------------------------------------------------------
/hello-world/helloworld/__init__.py:
--------------------------------------------------------------------------------
1 | name = "helloworld"
2 |
--------------------------------------------------------------------------------
/hello-world/helloworld/extension.py:
--------------------------------------------------------------------------------
1 | from localstack.extensions.api import Extension
2 |
3 |
4 | class HelloWorldExtension(Extension):
5 | name = "hello-world"
6 |
7 | def on_platform_start(self):
8 | print("hello world: localstack is starting!")
9 |
10 | def on_platform_ready(self):
11 | print("hello world: localstack is running!")
12 |
--------------------------------------------------------------------------------
/hello-world/logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/localstack/localstack-extensions/9519b7b9f7435618d5f9dc9895000fdf97000d1d/hello-world/logo.png
--------------------------------------------------------------------------------
/hello-world/pyproject.toml:
--------------------------------------------------------------------------------
1 | # LocalStack project configuration
2 | [build-system]
3 | requires = ['setuptools', 'wheel', 'plux>=1.3.1']
4 | build-backend = "setuptools.build_meta"
5 |
6 | [tool.black]
7 | line_length = 100
8 | include = '(helloworld/.*\.py$)'
9 |
10 | [tool.isort]
11 | profile = 'black'
12 | line_length = 100
13 |
14 | # call using pflake8
15 | [tool.flake8]
16 | max-line-length = 110
17 | ignore = 'E203,E266,E501,W503,F403'
18 | select = 'B,C,E,F,I,W,T4,B9'
19 | exclude = '.venv*,venv*,dist,*.egg-info,.git'
20 |
--------------------------------------------------------------------------------
/hello-world/setup.cfg:
--------------------------------------------------------------------------------
1 | [metadata]
2 | name = localstack-extension-hello-world
3 | version = 0.1.0
4 | summary = LocalStack Extension: Hello World
5 | description = A minimal LocalStack extension
6 | long_description = file: README.md
7 | long_description_content_type = text/markdown
8 | url = https://github.com/localstack/localstack-extensions/tree/main/hello-world
9 | author = Thomas Rausch
10 | author_email = thomas@localstack.cloud
11 |
12 | [options]
13 | zip_safe = False
14 | packages = find:
15 |
16 | [options.extras_require]
17 | dev =
18 | localstack-core>=1.0
19 |
20 | [options.entry_points]
21 | localstack.extensions =
22 | hello-world = helloworld.extension:HelloWorldExtension
23 |
--------------------------------------------------------------------------------
/hello-world/setup.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | from setuptools import setup
3 |
4 | setup()
5 |
--------------------------------------------------------------------------------
/httpbin/Makefile:
--------------------------------------------------------------------------------
1 | VENV_BIN = python3 -m venv
2 | VENV_DIR ?= .venv
3 | VENV_ACTIVATE = $(VENV_DIR)/bin/activate
4 | VENV_RUN = . $(VENV_ACTIVATE)
5 |
6 | venv: $(VENV_ACTIVATE)
7 |
8 | $(VENV_ACTIVATE): setup.py setup.cfg
9 | test -d .venv || $(VENV_BIN) .venv
10 | $(VENV_RUN); pip install --upgrade pip setuptools plux wheel
11 | $(VENV_RUN); pip install --upgrade black isort pyproject-flake8 flake8-black flake8-isort
12 | touch $(VENV_DIR)/bin/activate
13 |
14 | clean:
15 | rm -rf .venv/
16 | rm -rf build/
17 | rm -rf .eggs/
18 | rm -rf *.egg-info/
19 |
20 | lint: venv
21 | $(VENV_RUN); python -m pflake8 --show-source
22 |
23 | format: venv
24 | $(VENV_RUN); python -m isort .; python -m black .
25 |
26 | install: venv
27 | $(VENV_RUN); python -m pip install -e .[dev]
28 |
29 | dist: venv
30 | $(VENV_RUN); python setup.py sdist bdist_wheel
31 |
32 | publish: clean-dist venv dist
33 | $(VENV_RUN); pip install --upgrade twine; twine upload dist/*
34 |
35 | clean-dist: clean
36 | rm -rf dist/
37 |
38 | .PHONY: clean clean-dist dist install publish
39 |
--------------------------------------------------------------------------------
/httpbin/README.md:
--------------------------------------------------------------------------------
1 | LocalStack httpbin extension
2 | ===============================
3 | [](https://app.localstack.cloud/extensions/remote?url=git+https://github.com/localstack/localstack-extensions/#egg=localstack-extension-httpbin&subdirectory=httpbin)
4 |
5 | A simple HTTP Request & Response Service directly in LocalStack
6 | using [httpbin](https://github.com/postmanlabs/httpbin).
7 | Get the full httpbin experience directly in LocalStack without connecting to httpbin.org!
8 |
9 | The httpbin API is served through the hostname `http://httpbin.localhost.localstack.cloud:4566`.
10 |
11 | ## Install
12 |
13 | Install the extension by running:
14 |
15 | ```bash
16 | localstack extensions install localstack-extension-httpbin
17 | ```
18 |
19 | ## Usage
20 |
21 | Opening http://httpbin.localhost.localstack.cloud:4566 in the browser will show you the flasgger UI:
22 | 
23 |
24 | And you can call the API endpoints just as you would httpbin.org.
25 | 
26 |
27 | ## Development
28 |
29 | ### Install local development version
30 |
31 | To install the extension into localstack in developer mode, you will need Python 3.10, and create a virtual
32 | environment in the extensions project.
33 |
34 | In the newly generated project, simply run
35 |
36 | ```bash
37 | make install
38 | ```
39 |
40 | Then, to enable the extension for LocalStack, run
41 |
42 | ```bash
43 | localstack extensions dev enable .
44 | ```
45 |
46 | You can then start LocalStack with `EXTENSION_DEV_MODE=1` to load all enabled extensions:
47 |
48 | ```bash
49 | EXTENSION_DEV_MODE=1 localstack start
50 | ```
51 |
52 | ## Licensing
53 |
54 | * httpbin is licensed under the ISC license: https://github.com/postmanlabs/httpbin/blob/master/LICENSE
55 | * The httpbin source code is vendored with this extension, slight modifications were made to make it
56 | compatible with the latest Python and Werkzeug version.
57 | The modifications retain the ISC license
58 | * The extension code is licensed under the Apache 2.0 License
59 |
--------------------------------------------------------------------------------
/httpbin/localstack_httpbin/__init__.py:
--------------------------------------------------------------------------------
1 | name = "localstack_httpbin"
2 |
--------------------------------------------------------------------------------
/httpbin/localstack_httpbin/extension.py:
--------------------------------------------------------------------------------
1 | import logging
2 | from typing import Optional
3 |
4 | from localstack import config
5 | from localstack.config import get_edge_url
6 | from localstack.extensions.api import Extension, http
7 | from localstack.utils.net import get_free_tcp_port
8 | from localstack.utils.urls import localstack_host
9 |
10 | from localstack_httpbin.server import HttpbinServer
11 |
12 | LOG = logging.getLogger(__name__)
13 |
14 |
15 | class HttpbinExtension(Extension):
16 | name = "httpbin"
17 |
18 | hostname_prefix = "httpbin."
19 |
20 | server: Optional[HttpbinServer]
21 |
22 | def __init__(self):
23 | self.server = None
24 |
25 | def on_extension_load(self):
26 | level = logging.DEBUG if config.DEBUG else logging.INFO
27 | logging.getLogger("localstack_httpbin").setLevel(level=level)
28 | logging.getLogger("httpbin").setLevel(level=level)
29 |
30 | def on_platform_start(self):
31 | from localstack_httpbin.vendor.httpbin import core
32 | core.template['host'] = f"{self.get_public_hostname()}:{localstack_host().port}"
33 | self.server = HttpbinServer(get_free_tcp_port())
34 | LOG.debug("starting httpbin on %s", self.server.url)
35 | self.server.start()
36 |
37 | def get_public_hostname(self) -> str:
38 | return f"{self.hostname_prefix}{localstack_host().host}"
39 |
40 | def on_platform_ready(self):
41 | LOG.info("Serving httpbin on %s", get_edge_url(localstack_hostname=self.get_public_hostname()))
42 |
43 | def on_platform_shutdown(self):
44 | if self.server:
45 | self.server.shutdown()
46 |
47 | def update_gateway_routes(self, router: http.Router[http.RouteHandler]):
48 | endpoint = http.ProxyHandler(forward_base_url=self.server.url)
49 |
50 | router.add("/", host=f"{self.hostname_prefix}", endpoint=endpoint)
51 | router.add("/", host=f"{self.hostname_prefix}", endpoint=endpoint)
52 |
--------------------------------------------------------------------------------
/httpbin/localstack_httpbin/server.py:
--------------------------------------------------------------------------------
1 | import logging
2 |
3 | from localstack.utils.run import ShellCommandThread
4 | from localstack.utils.serving import Server
5 | from localstack.utils.threads import TMP_THREADS
6 |
7 |
8 | class HttpbinServer(Server):
9 | logger = logging.getLogger("httpbin")
10 |
11 | def do_start_thread(self):
12 | thread = ShellCommandThread(
13 | [
14 | "/opt/code/localstack/.venv/bin/python",
15 | "-m",
16 | "localstack_httpbin.vendor.httpbin.core",
17 | "--port",
18 | str(self.port),
19 | ],
20 | log_listener=self._log_listener,
21 | )
22 | TMP_THREADS.append(thread)
23 | thread.start()
24 | return thread
25 |
26 | def _log_listener(self, line, **_kwargs):
27 | self.logger.debug(line.rstrip())
28 |
--------------------------------------------------------------------------------
/httpbin/localstack_httpbin/vendor/__init__.py:
--------------------------------------------------------------------------------
1 | """Vendored libraries"""
2 |
--------------------------------------------------------------------------------
/httpbin/localstack_httpbin/vendor/httpbin/__init__.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | from .core import *
4 |
--------------------------------------------------------------------------------
/httpbin/localstack_httpbin/vendor/httpbin/filters.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | """
4 | httpbin.filters
5 | ~~~~~~~~~~~~~~~
6 |
7 | This module provides response filter decorators.
8 | """
9 |
10 | import gzip as gzip2
11 | import zlib
12 |
13 | from six import BytesIO
14 | from decimal import Decimal
15 | from time import time as now
16 |
17 | from decorator import decorator
18 | from flask import Flask, Response
19 |
20 |
21 | app = Flask(__name__)
22 |
23 |
24 | @decorator
25 | def x_runtime(f, *args, **kwargs):
26 | """X-Runtime Flask Response Decorator."""
27 |
28 | _t0 = now()
29 | r = f(*args, **kwargs)
30 | _t1 = now()
31 | r.headers['X-Runtime'] = '{0}s'.format(Decimal(str(_t1 - _t0)))
32 |
33 | return r
34 |
35 |
36 | @decorator
37 | def gzip(f, *args, **kwargs):
38 | """GZip Flask Response Decorator."""
39 |
40 | data = f(*args, **kwargs)
41 |
42 | if isinstance(data, Response):
43 | content = data.data
44 | else:
45 | content = data
46 |
47 | gzip_buffer = BytesIO()
48 | gzip_file = gzip2.GzipFile(
49 | mode='wb',
50 | compresslevel=4,
51 | fileobj=gzip_buffer
52 | )
53 | gzip_file.write(content)
54 | gzip_file.close()
55 |
56 | gzip_data = gzip_buffer.getvalue()
57 |
58 | if isinstance(data, Response):
59 | data.data = gzip_data
60 | data.headers['Content-Encoding'] = 'gzip'
61 | data.headers['Content-Length'] = str(len(data.data))
62 |
63 | return data
64 |
65 | return gzip_data
66 |
67 |
68 | @decorator
69 | def deflate(f, *args, **kwargs):
70 | """Deflate Flask Response Decorator."""
71 |
72 | data = f(*args, **kwargs)
73 |
74 | if isinstance(data, Response):
75 | content = data.data
76 | else:
77 | content = data
78 |
79 | deflater = zlib.compressobj()
80 | deflated_data = deflater.compress(content)
81 | deflated_data += deflater.flush()
82 |
83 | if isinstance(data, Response):
84 | data.data = deflated_data
85 | data.headers['Content-Encoding'] = 'deflate'
86 | data.headers['Content-Length'] = str(len(data.data))
87 |
88 | return data
89 |
90 | return deflated_data
91 |
92 |
93 | @decorator
94 | def brotli(f, *args, **kwargs):
95 | """Brotli Flask Response Decorator"""
96 | import brotli as _brotli
97 |
98 | data = f(*args, **kwargs)
99 |
100 | if isinstance(data, Response):
101 | content = data.data
102 | else:
103 | content = data
104 |
105 | deflated_data = _brotli.compress(content)
106 |
107 | if isinstance(data, Response):
108 | data.data = deflated_data
109 | data.headers['Content-Encoding'] = 'br'
110 | data.headers['Content-Length'] = str(len(data.data))
111 |
112 | return data
113 |
114 | return deflated_data
115 |
--------------------------------------------------------------------------------
/httpbin/localstack_httpbin/vendor/httpbin/static/favicon.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/localstack/localstack-extensions/9519b7b9f7435618d5f9dc9895000fdf97000d1d/httpbin/localstack_httpbin/vendor/httpbin/static/favicon.ico
--------------------------------------------------------------------------------
/httpbin/localstack_httpbin/vendor/httpbin/structures.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | """
4 | httpbin.structures
5 | ~~~~~~~~~~~~~~~~~~~
6 |
7 | Data structures that power httpbin.
8 | """
9 |
10 |
11 | class CaseInsensitiveDict(dict):
12 | """Case-insensitive Dictionary for headers.
13 |
14 | For example, ``headers['content-encoding']`` will return the
15 | value of a ``'Content-Encoding'`` response header.
16 | """
17 |
18 | def _lower_keys(self):
19 | return [k.lower() for k in self.keys()]
20 |
21 | def __contains__(self, key):
22 | return key.lower() in self._lower_keys()
23 |
24 | def __getitem__(self, key):
25 | # We allow fall-through here, so values default to None
26 | if key in self:
27 | return list(self.items())[self._lower_keys().index(key.lower())][1]
28 |
--------------------------------------------------------------------------------
/httpbin/localstack_httpbin/vendor/httpbin/templates/footer.html:
--------------------------------------------------------------------------------
1 |
10 | Availing himself of the mild, summer-cool weather that now reigned in these latitudes, and in preparation for the peculiarly active pursuits shortly to be anticipated, Perth, the begrimed, blistered old blacksmith, had not removed his portable forge to the hold again, after concluding his contributory work for Ahab's leg, but still retained it on deck, fast lashed to ringbolts by the foremast; being now almost incessantly invoked by the headsmen, and harpooneers, and bowsmen to do some little job for them; altering, or repairing, or new shaping their various weapons and boat furniture. Often he would be surrounded by an eager circle, all waiting to be served; holding boat-spades, pike-heads, harpoons, and lances, and jealously watching his every sooty movement, as he toiled. Nevertheless, this old man's was a patient hammer wielded by a patient arm. No murmur, no impatience, no petulance did come from him. Silent, slow, and solemn; bowing over still further his chronically broken back, he toiled away, as if toil were life itself, and the heavy beating of his hammer the heavy beating of his heart. And so it was.—Most miserable! A peculiar walk in this old man, a certain slight but painful appearing yawing in his gait, had at an early period of the voyage excited the curiosity of the mariners. And to the importunity of their persisted questionings he had finally given in; and so it came to pass that every one now knew the shameful story of his wretched fate. Belated, and not innocently, one bitter winter's midnight, on the road running between two country towns, the blacksmith half-stupidly felt the deadly numbness stealing over him, and sought refuge in a leaning, dilapidated barn. The issue was, the loss of the extremities of both feet. Out of this revelation, part by part, at last came out the four acts of the gladness, and the one long, and as yet uncatastrophied fifth act of the grief of his life's drama. He was an old man, who, at the age of nearly sixty, had postponedly encountered that thing in sorrow's technicals called ruin. He had been an artisan of famed excellence, and with plenty to do; owned a house and garden; embraced a youthful, daughter-like, loving wife, and three blithe, ruddy children; every Sunday went to a cheerful-looking church, planted in a grove. But one night, under cover of darkness, and further concealed in a most cunning disguisement, a desperate burglar slid into his happy home, and robbed them all of everything. And darker yet to tell, the blacksmith himself did ignorantly conduct this burglar into his family's heart. It was the Bottle Conjuror! Upon the opening of that fatal cork, forth flew the fiend, and shrivelled up his home. Now, for prudent, most wise, and economic reasons, the blacksmith's shop was in the basement of his dwelling, but with a separate entrance to it; so that always had the young and loving healthy wife listened with no unhappy nervousness, but with vigorous pleasure, to the stout ringing of her young-armed old husband's hammer; whose reverberations, muffled by passing through the floors and walls, came up to her, not unsweetly, in her nursery; and so, to stout Labor's iron lullaby, the blacksmith's infants were rocked to slumber. Oh, woe on woe! Oh, Death, why canst thou not sometimes be timely? Hadst thou taken this old blacksmith to thyself ere his full ruin came upon him, then had the young widow had a delicious grief, and her orphans a truly venerable, legendary sire to dream of in their after years; and all of them a care-killing competency.
11 |
12 |
13 |
14 |
15 |
--------------------------------------------------------------------------------
/httpbin/localstack_httpbin/vendor/httpbin/templates/sample.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
10 |
11 |
12 |
13 | Wake up to WonderWidgets!
14 |
15 |
16 |
17 |
18 | Overview
19 | Why WonderWidgets are great
20 |
21 | Who buys WonderWidgets
22 |
23 |
24 |
25 |
--------------------------------------------------------------------------------
/httpbin/localstack_httpbin/vendor/httpbin/templates/trackingscripts.html:
--------------------------------------------------------------------------------
1 | {#
2 | place tracking scripts (like Google Analytics) here
3 | #}
4 |
5 |
--------------------------------------------------------------------------------
/httpbin/localstack_httpbin/vendor/httpbin/utils.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | """
4 | httpbin.utils
5 | ~~~~~~~~~~~~~~~
6 |
7 | Utility functions.
8 | """
9 |
10 | import random
11 | import bisect
12 |
13 |
14 | def weighted_choice(choices):
15 | """Returns a value from choices chosen by weighted random selection
16 |
17 | choices should be a list of (value, weight) tuples.
18 |
19 | eg. weighted_choice([('val1', 5), ('val2', 0.3), ('val3', 1)])
20 |
21 | """
22 | values, weights = zip(*choices)
23 | total = 0
24 | cum_weights = []
25 | for w in weights:
26 | total += w
27 | cum_weights.append(total)
28 | x = random.uniform(0, total)
29 | i = bisect.bisect(cum_weights, x)
30 | return values[i]
31 |
--------------------------------------------------------------------------------
/httpbin/localstack_httpbin/vendor/httpbin/version.py:
--------------------------------------------------------------------------------
1 | version = "0.9.2.post1"
--------------------------------------------------------------------------------
/httpbin/logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/localstack/localstack-extensions/9519b7b9f7435618d5f9dc9895000fdf97000d1d/httpbin/logo.png
--------------------------------------------------------------------------------
/httpbin/pyproject.toml:
--------------------------------------------------------------------------------
1 | # LocalStack project configuration
2 | [build-system]
3 | requires = ['setuptools', 'wheel', 'plux>=1.3.3']
4 | build-backend = "setuptools.build_meta"
5 |
6 | [tool.black]
7 | line_length = 100
8 | include = '(localstack_httpbin/.*\.py$)'
9 | extend_exclude = '(localstack_httpbin/vendor)'
10 |
11 | [tool.isort]
12 | profile = 'black'
13 | line_length = 100
14 | extend_skip = ['localstack_httpbin/vendor/']
15 |
16 | # call using pflake8
17 | [tool.flake8]
18 | max-line-length = 110
19 | ignore = 'E203,E266,E501,W503,F403'
20 | select = 'B,C,E,F,I,W,T4,B9'
21 | exclude = '.venv*,venv*,dist,*.egg-info,.git'
22 |
--------------------------------------------------------------------------------
/httpbin/setup.cfg:
--------------------------------------------------------------------------------
1 | [metadata]
2 | name = localstack-extension-httpbin
3 | version = 0.2.0
4 | url = https://github.com/localstack/localstack-extensions/tree/main/httpbin
5 | author = LocalStack
6 | author_email = info@localstack.cloud
7 | summary = LocalStack Extension: httpbin
8 | description = A simple HTTP Request & Response Service directly in LocalStack
9 | long_description = file: README.md
10 | long_description_content_type = text/markdown; charset=UTF-8
11 |
12 | [options]
13 | zip_safe = False
14 | packages = find:
15 | install_requires =
16 | # requirements for vendored httpbin
17 | Flask
18 | MarkupSafe
19 | decorator
20 | itsdangerous
21 | brotlipy
22 | raven[flask]
23 | gevent
24 | flasgger
25 |
26 | [options.extras_require]
27 | dev =
28 | localstack-core>=2.2
29 |
30 | [options.entry_points]
31 | localstack.extensions =
32 | httpbin = localstack_httpbin.extension:HttpbinExtension
33 |
34 | [options.package_data]
35 | localstack_httpbin =
36 | vendor/httpbin/static/*.*
37 | vendor/httpbin/templates/*.*
38 | vendor/httpbin/templates/flasgger/*.*
39 | vendor/httpbin/templates/images/*.*
40 |
--------------------------------------------------------------------------------
/httpbin/setup.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | from setuptools import setup
3 |
4 | setup()
5 |
--------------------------------------------------------------------------------
/mailhog/Makefile:
--------------------------------------------------------------------------------
1 | VENV_BIN = python3 -m venv
2 | VENV_DIR ?= .venv
3 | VENV_ACTIVATE = $(VENV_DIR)/bin/activate
4 | VENV_RUN = . $(VENV_ACTIVATE)
5 |
6 | venv: $(VENV_ACTIVATE)
7 |
8 | $(VENV_ACTIVATE): setup.py setup.cfg
9 | test -d .venv || $(VENV_BIN) .venv
10 | $(VENV_RUN); pip install --upgrade pip setuptools plux wheel
11 | $(VENV_RUN); pip install --upgrade black isort pyproject-flake8 flake8-black flake8-isort
12 | touch $(VENV_DIR)/bin/activate
13 |
14 | clean:
15 | rm -rf .venv/
16 | rm -rf build/
17 | rm -rf .eggs/
18 | rm -rf *.egg-info/
19 |
20 | lint: ## Run code linter to check code style
21 | ($(VENV_RUN); python -m pflake8 --show-source)
22 |
23 | format: ## Run black and isort code formatter
24 | $(VENV_RUN); python -m isort mailhog; python -m black mailhog
25 |
26 | install: venv
27 | $(VENV_RUN); python -m pip install -e .[dev]
28 |
29 | dist: venv
30 | $(VENV_RUN); python setup.py sdist bdist_wheel
31 |
32 | publish: clean-dist venv dist
33 | $(VENV_RUN); pip install --upgrade twine; twine upload dist/*
34 |
35 | clean-dist: clean
36 | rm -rf dist/
37 |
38 | .PHONY: clean clean-dist dist install publish
39 |
--------------------------------------------------------------------------------
/mailhog/logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/localstack/localstack-extensions/9519b7b9f7435618d5f9dc9895000fdf97000d1d/mailhog/logo.png
--------------------------------------------------------------------------------
/mailhog/mailhog/__init__.py:
--------------------------------------------------------------------------------
1 | name = "mailhog"
2 |
--------------------------------------------------------------------------------
/mailhog/mailhog/extension.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import os
3 | from typing import TYPE_CHECKING, Optional
4 |
5 | from localstack import config, constants
6 | from localstack.extensions.api import Extension, http
7 | from werkzeug.utils import append_slash_redirect
8 |
9 | try:
10 | from localstack.pro.core import config as config_pro
11 | except ImportError:
12 | # TODO remove once we don't need compatibility with <3.6 anymore
13 | from localstack_ext import config as config_pro
14 |
15 | if TYPE_CHECKING:
16 | # conditional import for type checking during development. the actual import is deferred to plugin loading
17 | # to help with startup times
18 | from mailhog.server import MailHogServer
19 |
20 | LOG = logging.getLogger(__name__)
21 |
22 |
23 | class MailHogExtension(Extension):
24 | """
25 | MailHog extension. Uses environment-based configuration as described here:
26 | https://github.com/mailhog/MailHog/blob/master/docs/CONFIG.md.
27 |
28 | It exposes three services:
29 | * The mailhog API
30 | * The mailhog UI
31 | * The mailhog SMTP server
32 |
33 | The first two are served through a random port but then routed through the gateway and accessible through
34 | http://mailhog.localhost.localstack.cloud:4566, or http://localhost:4566/mailhog/ (note the trailing
35 | slash).
36 |
37 | The mailhog SMTP server is configured automatically as ``SMTP_HOST``, so when you use SES, mails get
38 | automatically delivered to mailhog. Neato burrito.
39 | """
40 |
41 | name = "mailhog"
42 |
43 | hostname_prefix = "mailhog."
44 | """Used for serving through a host rule."""
45 |
46 | server: Optional["MailHogServer"]
47 |
48 | def __init__(self):
49 | self.server = None
50 |
51 | def on_extension_load(self):
52 | # TODO: logging should be configured automatically for extensions
53 | if config.DEBUG:
54 | level = logging.DEBUG
55 | else:
56 | level = logging.INFO
57 | logging.getLogger("mailhog").setLevel(level=level)
58 |
59 | def on_platform_start(self):
60 | from mailhog.server import MailHogServer
61 |
62 | self.server = MailHogServer()
63 | LOG.info("starting mailhog server")
64 | self.server.start()
65 |
66 | if not config_pro.SMTP_HOST:
67 | config_pro.SMTP_HOST = f"localhost:{self.server.smtp_port}"
68 | os.environ["SMTP_HOST"] = config_pro.SMTP_HOST
69 | LOG.info("configuring SMTP host to internal mailhog smtp: %s", config_pro.SMTP_HOST)
70 |
71 | def on_platform_ready(self):
72 | # FIXME: reconcile with LOCALSTACK_HOST. the URL should be reachable from the host (the idea is
73 | # that users get a log message they can click on from the terminal)
74 | hostname_edge_url = f"{constants.LOCALHOST_HOSTNAME}:{config.get_edge_port_http()}"
75 | url = f"http://{self.hostname_prefix}{hostname_edge_url}"
76 | LOG.info("serving mailhog extension on host: %s", url)
77 |
78 | # trailing slash is important (see update_gateway_routes comment)
79 | url = f"{config.get_edge_url()}/{self.server.web_path}/"
80 | LOG.info("serving mailhog extension on path: %s", url)
81 |
82 | def update_gateway_routes(self, router: http.Router[http.RouteHandler]):
83 | endpoint = http.ProxyHandler(forward_base_url=self.server.url + "/" + self.server.web_path)
84 |
85 | def _redirect_endpoint(request, *args, **kwargs):
86 | if not request.path.endswith("/"):
87 | return append_slash_redirect(request.environ)
88 | return endpoint(request, *args, **kwargs)
89 |
90 | # hostname aliases
91 | router.add(
92 | "/",
93 | host=f"{self.hostname_prefix}",
94 | endpoint=endpoint,
95 | )
96 | router.add(
97 | "/",
98 | host=f"{self.hostname_prefix}",
99 | endpoint=endpoint,
100 | )
101 |
102 | # serve through the web path. here the werkzeug default functionality of strict slashes would be
103 | # useful, since the webapp needs to be accessed with a trailing slash (localhost:4566//)
104 | # otherwise the relative urls (like `images/logo.png`) are resolved as
105 | # `localhost:4566/images/login.png` which looks like an S3 access and will lead to localstack errors.
106 | # alas, we disabled this for good reason, so we need to catch the request and redirect it if needed
107 | router.add(
108 | f"/{self.server.web_path}",
109 | endpoint=_redirect_endpoint,
110 | )
111 | router.add(
112 | f"/{self.server.web_path}/",
113 | endpoint=endpoint,
114 | )
115 |
116 | def on_platform_shutdown(self):
117 | if self.server:
118 | self.server.shutdown()
119 |
--------------------------------------------------------------------------------
/mailhog/mailhog/package.py:
--------------------------------------------------------------------------------
1 | """
2 | Package for mailhog that downloads the mailhog binary from https://github.com/mailhog/MailHog.
3 | """
4 |
5 | import os
6 | from functools import lru_cache
7 |
8 | from localstack.packages import GitHubReleaseInstaller, Package, PackageInstaller
9 | from localstack.utils.platform import Arch, get_arch, get_os
10 |
11 | _MAILHOG_VERSION = os.environ.get("MH_VERSION") or "v1.0.1"
12 |
13 |
14 | class MailHogPackage(Package):
15 | def __init__(self, default_version: str = _MAILHOG_VERSION):
16 | super().__init__(name="MailHog", default_version=default_version)
17 |
18 | @lru_cache
19 | def _get_installer(self, version: str) -> PackageInstaller:
20 | return MailHogPackageInstaller(version)
21 |
22 | def get_versions(self) -> list[str]:
23 | return [_MAILHOG_VERSION]
24 |
25 |
26 | class MailHogPackageInstaller(GitHubReleaseInstaller):
27 | def __init__(self, version: str):
28 | super().__init__("mailhog", version, "mailhog/MailHog")
29 |
30 | def _get_github_asset_name(self):
31 | arch = get_arch()
32 | operating_system = get_os()
33 |
34 | if arch == Arch.amd64:
35 | bin_file = f"MailHog_{operating_system}_amd64"
36 | elif arch == Arch.arm64:
37 | bin_file = f"MailHog_{operating_system}_arm"
38 | else:
39 | raise NotImplementedError(f"unknown architecture {arch}")
40 |
41 | # the extension would typically only be used in the container, so windows support is not needed,
42 | # but since there are windows binaries might as well add them
43 | if operating_system == "windows":
44 | bin_file += ".exe"
45 |
46 | return bin_file
47 |
48 |
49 | mailhog_package = MailHogPackage()
50 |
--------------------------------------------------------------------------------
/mailhog/mailhog/server.py:
--------------------------------------------------------------------------------
1 | """
2 | Tools to run the mailhog service.
3 | """
4 |
5 | import logging
6 | import os
7 |
8 | from localstack import config
9 | from localstack.utils.net import get_free_tcp_port
10 | from localstack.utils.run import ShellCommandThread
11 | from localstack.utils.serving import Server
12 | from localstack.utils.threads import TMP_THREADS
13 |
14 | from mailhog.package import mailhog_package
15 |
16 | LOG = logging.getLogger(__name__)
17 |
18 |
19 | class MailHogServer(Server):
20 | """
21 | Mailhog server abstraction. Uses environment-based configuration as described here:
22 | https://github.com/mailhog/MailHog/blob/master/docs/CONFIG.md.
23 |
24 | It exposes three services:
25 | * The mailhog API (random port)
26 | * The mailhog UI (same port as API)
27 | * The mailhog SMTP server (25)
28 |
29 | It supports snapshot persistence by pointing the MH_MAILDIR_PATH to the asset directory.
30 | """
31 |
32 | default_web_path = "_extension/mailhog"
33 | """WebPath under which the UI is served (without leading or trailing slashes)"""
34 |
35 | default_smtp_port = 25
36 | """Default port used to expose the SMTP server, unless MH_SMTP_BIND_ADDR is set."""
37 |
38 | def __init__(self, host: str = "0.0.0.0") -> None:
39 | super().__init__(self._get_configured_or_random_api_port(), host)
40 |
41 | def do_start_thread(self):
42 | mailhog_package.install()
43 |
44 | cmd = self._create_command()
45 | env = self._create_env_vars()
46 |
47 | LOG.debug("starting mailhog thread: %s, %s", cmd, env)
48 |
49 | t = ShellCommandThread(
50 | cmd,
51 | env_vars=env,
52 | name="mailhog",
53 | log_listener=self._log_listener,
54 | )
55 | TMP_THREADS.append(t)
56 | t.start()
57 | return t
58 |
59 | def _log_listener(self, line, **_kwargs):
60 | LOG.debug(line.rstrip())
61 |
62 | @property
63 | def ui_port(self) -> int:
64 | if addr := os.getenv("MH_UI_BIND_ADDR"):
65 | return int(addr.split(":")[-1])
66 | return self.port
67 |
68 | @property
69 | def smtp_port(self) -> int:
70 | if addr := os.getenv("MH_SMTP_BIND_ADDR"):
71 | return int(addr.split(":")[-1])
72 |
73 | return self.default_smtp_port
74 |
75 | @property
76 | def web_path(self):
77 | """Returns the configured path under which the web UI will be available when using path-based
78 | routing. This should be without trailing or prefixed slashes. by default, it results in
79 | http://localhost:4566/_extension/mailhog."""
80 | return os.getenv("MH_UI_WEB_PATH") or self.default_web_path
81 |
82 | def _create_env_vars(self) -> dict:
83 | """All configuration of mailhog"""
84 | # pre-populate the relevant variables
85 | env = {k: v for k, v in os.environ.items() if k.startswith("MH_")}
86 |
87 | # web path is needed to not conflict with the default router
88 | env["MH_UI_WEB_PATH"] = self.web_path
89 |
90 | # configure persistence unless the user overwrites it
91 | if config.PERSISTENCE and not os.getenv("MH_STORAGE"):
92 | env["MH_STORAGE"] = "maildir"
93 | # pointing it to the asset directory will make persistence work out of the box
94 | env["MH_MAILDIR_PATH"] = env.get(
95 | "MH_MAILDIR_PATH", os.path.join(config.dirs.data, "mailhog")
96 | )
97 |
98 | if not os.getenv("MH_API_BIND_ADDR"):
99 | env["MH_API_BIND_ADDR"] = f"{self.host}:{self.port}"
100 |
101 | if not os.getenv("MH_UI_BIND_ADDR"):
102 | env["MH_UI_BIND_ADDR"] = f"{self.host}:{self.ui_port}"
103 |
104 | if not os.getenv("MH_SMTP_BIND_ADDR"):
105 | env["MH_SMTP_BIND_ADDR"] = f"{self.host}:{self.smtp_port}"
106 |
107 | if not os.getenv("MH_HOSTNAME"):
108 | # TODO: reconcile with LOCALSTACK_HOST (although this may only be cosmetics for the EHLO command)
109 | env["MH_HOSTNAME"] = "mailhog.localhost.localstack.cloud"
110 |
111 | return env
112 |
113 | def _create_command(self) -> list[str]:
114 | cmd = [mailhog_package.get_installer().get_executable_path()]
115 | return cmd
116 |
117 | @staticmethod
118 | def _get_configured_or_random_api_port() -> int:
119 | if addr := os.getenv("MH_API_BIND_ADDR"):
120 | return int(addr.split(":")[-1])
121 |
122 | return get_free_tcp_port()
123 |
--------------------------------------------------------------------------------
/mailhog/pyproject.toml:
--------------------------------------------------------------------------------
1 | # LocalStack project configuration
2 | [build-system]
3 | requires = ['setuptools', 'wheel', 'plux>=1.3.1']
4 | build-backend = "setuptools.build_meta"
5 |
6 | [tool.black]
7 | line_length = 100
8 | include = '(mailhog/.*\.py$)'
9 |
10 | [tool.isort]
11 | profile = 'black'
12 | line_length = 100
13 |
14 | # call using pflake8
15 | [tool.flake8]
16 | max-line-length = 110
17 | ignore = 'E203,E266,E501,W503,F403'
18 | select = 'B,C,E,F,I,W,T4,B9'
19 | exclude = '.venv*,venv*,dist,*.egg-info,.git'
20 |
--------------------------------------------------------------------------------
/mailhog/setup.cfg:
--------------------------------------------------------------------------------
1 | [metadata]
2 | name = localstack-extension-mailhog
3 | version = 0.2.0
4 | url = https://github.com/localstack/localstack-extensions/tree/main/mailhog
5 | author = LocalStack
6 | author_email = info@localstack.cloud
7 | summary: LocalStack Extension: MailHog
8 | description = Web and API based SMTP testing directly in LocalStack using MailHog
9 | long_description = file: README.md
10 | long_description_content_type = text/markdown; charset=UTF-8
11 |
12 | [options]
13 | zip_safe = False
14 | packages = find:
15 |
16 | [options.extras_require]
17 | dev =
18 | localstack-core>=2.2
19 |
20 | [options.entry_points]
21 | localstack.extensions =
22 | mailhog = mailhog.extension:MailHogExtension
23 |
--------------------------------------------------------------------------------
/mailhog/setup.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | from setuptools import setup
3 |
4 | setup()
5 |
--------------------------------------------------------------------------------
/miniflare/LICENSE:
--------------------------------------------------------------------------------
1 | cloudflare/miniflare is licensed under the MIT License
2 |
3 | See https://github.com/cloudflare/miniflare/blob/master/LICENSE:
4 |
5 | ---
6 | Copyright (c) 2021 MrBBot
7 |
8 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
9 |
10 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
11 |
12 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
13 |
--------------------------------------------------------------------------------
/miniflare/Makefile:
--------------------------------------------------------------------------------
1 | VENV_BIN = python3 -m venv
2 | VENV_DIR ?= .venv
3 | VENV_ACTIVATE = $(VENV_DIR)/bin/activate
4 | VENV_RUN = . $(VENV_ACTIVATE)
5 |
6 | venv: $(VENV_ACTIVATE)
7 |
8 | $(VENV_ACTIVATE): setup.py setup.cfg
9 | test -d .venv || $(VENV_BIN) .venv
10 | $(VENV_RUN); pip install --upgrade pip setuptools plux wheel
11 | $(VENV_RUN); pip install --upgrade black isort pyproject-flake8 flake8-black flake8-isort
12 | touch $(VENV_DIR)/bin/activate
13 |
14 | clean:
15 | rm -rf .venv/
16 | rm -rf build/
17 | rm -rf .eggs/
18 | rm -rf *.egg-info/
19 |
20 | lint: ## Run code linter to check code style
21 | $(VENV_RUN); python -m pflake8 --show-source --ignore=E501 --exclude .venv,build
22 |
23 | format: ## Run black and isort code formatter
24 | $(VENV_RUN); python -m isort .; python -m black miniflare
25 |
26 | install: venv
27 | $(VENV_RUN); python -m pip install -e .[dev]
28 |
29 | dist: venv
30 | $(VENV_RUN); python setup.py sdist bdist_wheel
31 |
32 | publish: clean-dist venv dist
33 | $(VENV_RUN); pip install --upgrade twine; twine upload dist/*
34 |
35 | clean-dist: clean
36 | rm -rf dist/
37 |
38 | .PHONY: clean clean-dist dist install publish
39 |
--------------------------------------------------------------------------------
/miniflare/README.md:
--------------------------------------------------------------------------------
1 | Miniflare LocalStack extension (experimental)
2 | =============================================
3 | [](https://app.localstack.cloud/extensions/remote?url=git+https://github.com/localstack/localstack-extensions/#egg=localstack-extension-miniflare&subdirectory=miniflare)
4 |
5 | This extension makes [Miniflare](https://miniflare.dev) (dev environment for Cloudflare workers) available directly in LocalStack!
6 |
7 | ⚠️ Please note that this extension is experimental and currently under active development.
8 |
9 | ## Installing
10 |
11 | ```bash
12 | localstack extensions install "git+https://github.com/localstack/localstack-extensions/#egg=localstack-extension-miniflare&subdirectory=miniflare"
13 | ```
14 |
15 | ## How to use
16 |
17 | To publish the sample application to Miniflare running in LocalStack, we can use the `wrangler` CLI with the following environment variables for local dev mode:
18 | ```
19 | export CLOUDFLARE_API_TOKEN=test
20 | export CLOUDFLARE_API_BASE_URL=http://localhost:4566/miniflare
21 | wrangler publish
22 | ```
23 |
24 | Note: if you're having troubles with this configuration, e.g., seeing "Fetch failed" error messages on `wrangler publish`, try using this API endpoint instead:
25 | ```
26 | export CLOUDFLARE_API_BASE_URL=https://localhost.localstack.cloud:4566/miniflare
27 | ```
28 |
29 | Once deployed, the Cloudflare worker can be easily invoked via `curl`:
30 | ```
31 | $ curl http://hello.miniflare.localhost.localstack.cloud:4566/test
32 | Hello World!
33 | ```
34 |
35 | ## Change Log
36 |
37 | * `0.1.2`: Pin wrangler version to fix hanging miniflare invocations; fix encoding headers for invocation responses
38 | * `0.1.1`: Adapt for LocalStack v3.0
39 | * `0.1.0`: Upgrade to Miniflare 3.0
40 | * `0.0.1`: Initial version.
41 |
42 | ## License
43 |
44 | The `cloudflare/miniflare` package and related tooling is licensed under the MIT License.
45 |
46 | The code of this LocalStack Extension is published under the Apache 2.0 license.
47 |
--------------------------------------------------------------------------------
/miniflare/example-aws/index.js:
--------------------------------------------------------------------------------
1 | // sample worker app, based on: https://github.com/cloudflare/workers-sdk/tree/main/templates/worker-aws
2 |
3 | import { DynamoDBClient, GetItemCommand, PutItemCommand } from '@aws-sdk/client-dynamodb';
4 | import { RDSDataClient, ExecuteStatementCommand } from '@aws-sdk/client-rds-data';
5 | import { SQSClient, SendMessageCommand } from '@aws-sdk/client-sqs';
6 |
7 | export default {
8 | async fetch(request) { return handleRequest(request); },
9 | };
10 |
11 | const myCredentialProvider = () => ({
12 | // use wrangler secrets to provide these global variables
13 | accessKeyId: AWS_ACCESS_KEY_ID,
14 | secretAccessKey: AWS_SECRET_ACCESS_KEY,
15 | });
16 |
17 | // define AWS SDK client config with LocalStack endpoint
18 | const endpoint = "http://localhost:4566";
19 | const clientConfig = {
20 | region: AWS_REGION,
21 | credentialDefaultProvider: myCredentialProvider,
22 | endpoint,
23 | };
24 |
25 | async function handleRequest() {
26 | // The AWS SDK tries to use crypto from off of the window,
27 | // so we need to trick it into finding it where it expects it
28 | global.window = {};
29 | window.crypto = crypto;
30 |
31 | // TODO: Try all the examples!
32 | // Uncomment the example you'd like to try:
33 | const result = await sqsExample();
34 | // const result = await dynamoExample();
35 | // const result = await auroraExample(request);
36 |
37 | return new Response(JSON.stringify(result), {
38 | headers: { 'content-type': 'text/plain' },
39 | });
40 | }
41 |
42 | async function sqsExample() {
43 | const client = new SQSClient(clientConfig);
44 |
45 | const send = new SendMessageCommand({
46 | // use wrangler secrets to provide this global variable
47 | QueueUrl: AWS_SQS_QUEUE_URL,
48 | MessageBody: 'Hello SQS from a Cloudflare Worker',
49 | });
50 |
51 | return client.send(send);
52 | }
53 |
54 | async function dynamoExample() {
55 | const client = new DynamoDBClient(clientConfig);
56 |
57 | // replace with your table name and key as appropriate
58 | const put = new PutItemCommand({
59 | TableName: AWS_DYNAMO_TABLE,
60 | Item: {
61 | greeting: { S: 'Hello!' },
62 | [AWS_DYNAMO_PRIMARYKEY]: { S: 'world' },
63 | },
64 | });
65 | await client.send(put);
66 | const get = new GetItemCommand({
67 | TableName: AWS_DYNAMO_TABLE,
68 | Key: {
69 | [AWS_DYNAMO_PRIMARYKEY]: { S: 'world' },
70 | },
71 | });
72 | const results = await client.send(get);
73 | return results.Item;
74 | }
75 |
76 | async function auroraExample(request) {
77 | if (request.method === 'POST') {
78 | const jsonData = await request.json();
79 | return await auroraPostData(jsonData);
80 | } else {
81 | // We need to create a URL object so we can read the query parameters from the request
82 | const url = new URL(request.url);
83 | const ID = url.searchParams.get('ID');
84 | return await auroraGetData(ID);
85 | }
86 | }
87 |
88 | async function auroraGetData(ID) {
89 | const client = new RDSDataClient(clientConfig);
90 |
91 | const call = new ExecuteStatementCommand({
92 | // IMPORTANT: This is NOT production ready!
93 | // This SQL command is susceptible to SQL Injections
94 | sql: `SELECT * FROM ${AWS_AURORA_TABLE} WHERE id = ${ID};`,
95 | resourceArn: AWS_AURORA_RESOURCE_ARN,
96 | secretArn: AWS_AURORA_SECRET_ARN,
97 | });
98 |
99 | const results = await client.send(call);
100 |
101 | return results.records;
102 | }
103 |
104 | async function auroraPostData(jsonData) {
105 | const client = new RDSDataClient(clientConfig);
106 |
107 | const keysArray = Object.keys(jsonData);
108 | let keys = '';
109 | let values = '';
110 |
111 | keysArray.forEach((key, index) => {
112 | keys += `${key}`;
113 | values += `'${jsonData[key]}'`;
114 |
115 | if (index !== keysArray.length - 1) {
116 | keys += ', ';
117 | values += ', ';
118 | }
119 | });
120 |
121 | const call = new ExecuteStatementCommand({
122 | // IMPORTANT: This is NOT production ready!
123 | // This SQL command is susceptible to SQL Injections
124 | sql: `INSERT INTO ${AWS_AURORA_TABLE}(${keys}) VALUES (${values});`,
125 | resourceArn: AWS_AURORA_RESOURCE_ARN,
126 | secretArn: AWS_AURORA_SECRET_ARN,
127 | });
128 |
129 | const results = await client.send(call);
130 |
131 | return results;
132 | }
133 |
--------------------------------------------------------------------------------
/miniflare/example-aws/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "template-worker-aws",
3 | "version": "0.0.0",
4 | "private": true,
5 | "type": "module",
6 | "scripts": {
7 | "build": "worktop build index.js",
8 | "deploy": "wrangler publish index.js",
9 | "dev": "wrangler dev index.js --local"
10 | },
11 | "dependencies": {
12 | "@aws-sdk/client-dynamodb": "^3.82.0",
13 | "@aws-sdk/client-rds": "^3.82.0",
14 | "@aws-sdk/client-rds-data": "^3.82.0",
15 | "@aws-sdk/client-sqs": "^3.82.0"
16 | },
17 | "devDependencies": {
18 | "@esbuild-plugins/node-modules-polyfill": "0.1.4",
19 | "worktop.build": "0.0.5",
20 | "wrangler": "^2.0.0"
21 | }
22 | }
--------------------------------------------------------------------------------
/miniflare/example-aws/run.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | export AWS_DEFAULT_REGION="us-east-1"
4 | export CLOUDFLARE_API_TOKEN=test
5 | export CLOUDFLARE_API_BASE_URL=http://localhost:4566/miniflare
6 |
7 | # create resources in LocalStack
8 | # awslocal rds create-db-instance AWS_AURORA_TABLE ...
9 | queueUrl=$(awslocal sqs create-queue --queue-name q1 | jq -r .QueueUrl)
10 |
11 | # set wrangler secrets
12 | echo "test" | wrangler secret put AWS_AURORA_RESOURCE_ARN
13 | echo "test" | wrangler secret put AWS_AURORA_SECRET_ARN
14 | echo "test" | wrangler secret put AWS_ACCESS_KEY_ID
15 | echo "test" | wrangler secret put AWS_SECRET_ACCESS_KEY
16 | echo "$queueUrl" | wrangler secret put AWS_SQS_QUEUE_URL
17 |
18 | # publish worker script
19 | wrangler publish
20 |
21 | workerEndpoint=http://worker-aws.miniflare.localhost.localstack.cloud:4566/test
22 | echo "Deployment done. You can now invoke the worker via:"
23 | echo "curl $workerEndpoint"
24 |
25 | curl $workerEndpoint
26 | awslocal sqs receive-message --queue-url $queueUrl
27 |
--------------------------------------------------------------------------------
/miniflare/example-aws/wrangler.toml:
--------------------------------------------------------------------------------
1 | name = "worker-aws"
2 | main="index.js"
3 | compatibility_date = "2022-05-03"
4 |
5 | [build]
6 | command = "npm run build"
7 |
8 | [vars]
9 | AWS_REGION = "us-east-1"
10 | AWS_DYNAMO_TABLE = "test_table_name"
11 | AWS_DYNAMO_PRIMARYKEY = "test_primary_key"
12 | AWS_AURORA_TABLE = "demo.friends"
13 |
--------------------------------------------------------------------------------
/miniflare/example/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "hello",
3 | "version": "0.0.0",
4 | "devDependencies": {
5 | "@cloudflare/workers-types": "^4.20221111.1",
6 | "typescript": "^4.9.4",
7 | "wrangler": "2.6.2"
8 | },
9 | "private": true,
10 | "scripts": {
11 | "start": "wrangler dev",
12 | "deploy": "wrangler publish"
13 | }
14 | }
15 |
--------------------------------------------------------------------------------
/miniflare/example/src/index.ts:
--------------------------------------------------------------------------------
1 | /**
2 | * Welcome to Cloudflare Workers! This is your first worker.
3 | *
4 | * - Run `wrangler dev src/index.ts` in your terminal to start a development server
5 | * - Open a browser tab at http://localhost:8787/ to see your worker in action
6 | * - Run `wrangler publish src/index.ts --name my-worker` to publish your worker
7 | *
8 | * Learn more at https://developers.cloudflare.com/workers/
9 | */
10 |
11 | export interface Env {
12 | // Example binding to KV. Learn more at https://developers.cloudflare.com/workers/runtime-apis/kv/
13 | // MY_KV_NAMESPACE: KVNamespace;
14 | //
15 | // Example binding to Durable Object. Learn more at https://developers.cloudflare.com/workers/runtime-apis/durable-objects/
16 | // MY_DURABLE_OBJECT: DurableObjectNamespace;
17 | //
18 | // Example binding to R2. Learn more at https://developers.cloudflare.com/workers/runtime-apis/r2/
19 | // MY_BUCKET: R2Bucket;
20 | }
21 |
22 | export default {
23 | async fetch(
24 | request: Request,
25 | env: Env,
26 | ctx: ExecutionContext
27 | ): Promise {
28 | return new Response("Hello World - Miniflare in LocalStack!");
29 | },
30 | };
31 |
--------------------------------------------------------------------------------
/miniflare/example/wrangler.toml:
--------------------------------------------------------------------------------
1 | name = "hello"
2 | main = "src/index.ts"
3 | compatibility_date = "2022-12-14"
4 |
--------------------------------------------------------------------------------
/miniflare/logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/localstack/localstack-extensions/9519b7b9f7435618d5f9dc9895000fdf97000d1d/miniflare/logo.png
--------------------------------------------------------------------------------
/miniflare/miniflare/__init__.py:
--------------------------------------------------------------------------------
1 | name = "helloworld"
2 |
--------------------------------------------------------------------------------
/miniflare/miniflare/config.py:
--------------------------------------------------------------------------------
1 | HANDLER_PATH_MINIFLARE = "/miniflare"
2 |
--------------------------------------------------------------------------------
/miniflare/setup.cfg:
--------------------------------------------------------------------------------
1 | [metadata]
2 | name = localstack-extension-miniflare
3 | version = 0.1.3
4 | summary = LocalStack Extension: Miniflare
5 | description = This extension makes Miniflare (dev environment for Cloudflare workers) available directly in LocalStack
6 | long_description = file: README.md
7 | long_description_content_type = text/markdown
8 | url = https://github.com/localstack/localstack-extensions/tree/main/miniflare
9 | author = Waldemar Hummer
10 | author_email = waldemar@localstack.cloud
11 |
12 | [options]
13 | zip_safe = False
14 | packages = find:
15 |
16 | [options.extras_require]
17 | dev =
18 | localstack-core>=1.0.0
19 |
20 | [options.entry_points]
21 | localstack.extensions =
22 | miniflare = miniflare.extension:MiniflareExtension
23 |
--------------------------------------------------------------------------------
/miniflare/setup.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | from setuptools import setup
3 |
4 | setup()
5 |
--------------------------------------------------------------------------------
/openai/LICENSE.txt:
--------------------------------------------------------------------------------
1 | Copyright (c) 2017+ LocalStack contributors
2 | Copyright (c) 2016 Atlassian Pty Ltd
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 |
--------------------------------------------------------------------------------
/openai/Makefile:
--------------------------------------------------------------------------------
1 | VENV_BIN = python3 -m venv
2 | VENV_DIR ?= .venv
3 | VENV_ACTIVATE = $(VENV_DIR)/bin/activate
4 | VENV_RUN = . $(VENV_ACTIVATE)
5 |
6 | venv: $(VENV_ACTIVATE)
7 |
8 | $(VENV_ACTIVATE): setup.py setup.cfg
9 | test -d .venv || $(VENV_BIN) .venv
10 | $(VENV_RUN); pip install --upgrade pip setuptools plux wheel
11 | $(VENV_RUN); pip install --upgrade black isort pyproject-flake8 flake8-black flake8-isort
12 | touch $(VENV_DIR)/bin/activate
13 |
14 | clean:
15 | rm -rf .venv/
16 | rm -rf build/
17 | rm -rf .eggs/
18 | rm -rf *.egg-info/
19 |
20 | lint: ## Run code linter to check code style
21 | ($(VENV_RUN); python -m pflake8 --show-source)
22 |
23 | format: ## Run black and isort code formatter
24 | $(VENV_RUN); python -m isort .; python -m black .
25 |
26 | install: venv
27 | $(VENV_RUN); python -m pip install -e .[dev]
28 |
29 | dist: venv
30 | $(VENV_RUN); python setup.py sdist bdist_wheel
31 |
32 | publish: clean-dist venv dist
33 | $(VENV_RUN); pip install --upgrade twine; twine upload dist/*
34 |
35 | clean-dist: clean
36 | rm -rf dist/
37 |
38 | .PHONY: clean clean-dist dist install publish
39 |
--------------------------------------------------------------------------------
/openai/README.md:
--------------------------------------------------------------------------------
1 | # LocalStack OpenAI Extension
2 |
3 | 
4 | 
5 | [](https://travis-ci.com/yourusername/localstack-openai-mock)
6 |
7 | This is a LocalStack extension that allows you to mock the OpenAI API for testing and development purposes. It provides a convenient way to interact with a mock OpenAI service locally using LocalStack.
8 |
9 | ## Installation
10 |
11 | You can install this extension directly using the LocalStack extension manager:
12 |
13 | ```bash
14 | localstack extensions install localstack-extension-openai
15 | ```
16 |
17 | ## Using
18 |
19 | Once installed, you can access the OpenAI Mock API through `localhost:4510/v1`.
20 |
21 | ### Example
22 |
23 | ```python
24 |
25 | import openai
26 | openai.organization = "org-test"
27 | openai.api_key = "test"
28 | openai.api_base = "http://localhost:4510/v1"
29 |
30 | completion = openai.ChatCompletion.create(
31 | model="gpt-3.5-turbo",
32 | messages=[
33 | {"role": "system", "content": "You are a helpful assistant."},
34 | {"role": "user", "content": "Hello!"}
35 | ]
36 | )
37 | print(completion.choices)
38 | ```
39 |
40 | ## Coverage
41 | - [x] Chat completion
42 | - [x] Engines Listing
43 | - [x] Transcribe
44 | - [x] Translate
45 | - [x] Generate Image URL
46 | - [ ] Generate Image Base64
47 | - [ ] Embeddings
48 | - [ ] Fine Tuning
49 | - [ ] Files
50 | - [ ] Moderations
51 |
52 |
53 |
54 | ## Authors
55 | **Cristopher Pinzon** cristopher.pinzon@localstack.cloud
56 |
57 |
58 | ## Licensing
59 | * The extension code is licensed under the Apache 2.0 License
60 |
61 | ### Thank you for using the LocalStack OpenAI Extension!
62 |
--------------------------------------------------------------------------------
/openai/localstack_openai/__init__.py:
--------------------------------------------------------------------------------
1 | __version__ = "0.1.0"
2 |
--------------------------------------------------------------------------------
/openai/localstack_openai/extension.py:
--------------------------------------------------------------------------------
1 | import logging
2 |
3 | from localstack import config
4 | from localstack.extensions.api import Extension, http
5 | from rolo.router import RuleAdapter, WithHost
6 | from werkzeug.routing import Submount
7 |
8 | LOG = logging.getLogger(__name__)
9 |
10 |
11 | class LocalstackOpenAIExtension(Extension):
12 | name = "openai"
13 |
14 | submount = "/_extension/openai"
15 | subdomain = "openai"
16 |
17 | def on_extension_load(self):
18 | logging.getLogger("localstack_openai").setLevel(
19 | logging.DEBUG if config.DEBUG else logging.INFO
20 | )
21 |
22 | def update_gateway_routes(self, router: http.Router[http.RouteHandler]):
23 | from localstack_openai.mock_openai import Api
24 |
25 | api = RuleAdapter(Api())
26 |
27 | # add path routes for localhost:4566/v1/chat/completion
28 | router.add(
29 | [
30 | Submount(self.submount, [api]),
31 | WithHost(f"{self.subdomain}.{config.LOCALSTACK_HOST.host}<__host__>", [api]),
32 | ]
33 | )
34 |
35 | LOG.info(
36 | "OpenAI mock available at %s%s", str(config.LOCALSTACK_HOST).rstrip("/"), self.submount
37 | )
38 | LOG.info("OpenAI mock available at %s", f"{self.subdomain}.{config.LOCALSTACK_HOST}")
39 |
--------------------------------------------------------------------------------
/openai/pyproject.toml:
--------------------------------------------------------------------------------
1 | [tool.black]
2 | line_length = 100
3 | include = '(localstack_openai|tests)/.*\.py$'
4 |
5 | [tool.isort]
6 | profile = 'black'
7 | line_length = 100
8 |
9 | [tool.flake8]
10 | max-line-length = 100
11 | ignore = 'E501'
12 | exclude = './setup.py,.venv*,dist,build'
--------------------------------------------------------------------------------
/openai/setup.cfg:
--------------------------------------------------------------------------------
1 | [metadata]
2 | name = localstack-extension-openai
3 | version = attr: localstack_openai.__version__
4 | url = https://github.com/localstack/localstack-extensions/tree/main/openai
5 | author = Cristopher Pinzon
6 | author_email = cristopher.pinzon@localstack.cloud
7 | summary = LocalStack Extension: OpenAI
8 | description = OpenAI extension for LocalStack
9 | long_description = file: README.md
10 | long_description_content_type = text/markdown; charset=UTF-8
11 | license = Apache License 2.0
12 | classifiers =
13 | Development Status :: 5 - Production/Stable
14 | License :: OSI Approved :: Apache Software License
15 | Operating System :: OS Independent
16 | Programming Language :: Python :: 3
17 | Programming Language :: Python :: 3.8
18 | Programming Language :: Python :: 3.9
19 | Topic :: Software Development :: Libraries
20 | Topic :: Utilities
21 |
22 | [options]
23 | zip_safe = False
24 | packages = find:
25 | install_requires =
26 | faker>=8.12.1
27 | plux>=1.3
28 | rolo>=0.3
29 | test_requires =
30 | openai>=0.10.2,<1.0
31 | pytest>=6.2.4
32 |
33 | [options.extras_require]
34 | dev =
35 | localstack-core>=3.1
36 | openai>=0.10.2,<1.0
37 | pytest>=6.2.4
38 | black==22.3.0
39 | isort==5.10.1
40 |
41 | [options.packages.find]
42 | exclude =
43 | tests*
44 |
45 | [options.package_data]
46 | * = *.md
47 |
48 | [options.entry_points]
49 | localstack.extensions =
50 | localstack_openai = localstack_openai.extension:LocalstackOpenAIExtension
--------------------------------------------------------------------------------
/openai/setup.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | from setuptools import setup
3 |
4 | entry_points = {
5 | "localstack.extensions": [
6 | "localstack_openai=localstack_openai.extension:LocalstackOpenAIExtension"
7 | ],
8 | }
9 |
10 | setup(entry_points=entry_points)
--------------------------------------------------------------------------------
/openai/tests/sample.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/localstack/localstack-extensions/9519b7b9f7435618d5f9dc9895000fdf97000d1d/openai/tests/sample.wav
--------------------------------------------------------------------------------
/openai/tests/test_api.py:
--------------------------------------------------------------------------------
1 | import openai
2 |
3 | openai.organization = "org-test"
4 | openai.api_key = "test"
5 | # openai.api_base = "http://localhost:1323/v1"
6 | openai.api_base = "http://localhost:4566/_extension/openai/v1"
7 |
8 |
9 | def test_list_models():
10 | models = openai.Engine.list()
11 | assert len(models.data) > 0
12 |
13 |
14 | def test_chat_completion():
15 | completion = openai.ChatCompletion.create(
16 | model="gpt-3.5-turbo",
17 | messages=[
18 | {"role": "system", "content": "You are a helpful assistant."},
19 | {"role": "user", "content": "Hello!"},
20 | ],
21 | )
22 | assert len(completion.choices) > 0
23 |
24 |
25 | def test_transcribe():
26 | transcript = openai.Audio.transcribe("whisper-1", open("sample.wav", "rb"))
27 | assert len(transcript.text) > 0
28 |
29 |
30 | def test_translate():
31 | translate = openai.Audio.translate("whisper-1", open("sample.wav", "rb"))
32 | assert len(translate.text) > 0
33 |
34 |
35 | def test_generate_image():
36 | response = openai.Image.create(prompt="a white siamese cat", n=1, size="1024x1024")
37 | assert response["data"][0]["url"]
38 |
--------------------------------------------------------------------------------
/prometheus/Makefile:
--------------------------------------------------------------------------------
1 | VENV_BIN = python3 -m venv
2 | VENV_DIR ?= .venv
3 | VENV_ACTIVATE = $(VENV_DIR)/bin/activate
4 | VENV_RUN = . $(VENV_ACTIVATE)
5 |
6 | venv: $(VENV_ACTIVATE)
7 |
8 | $(VENV_ACTIVATE): pyproject.toml
9 | test -d .venv || $(VENV_BIN) .venv
10 | $(VENV_RUN); pip install --upgrade pip setuptools plux wheel
11 | $(VENV_RUN); pip install --upgrade black isort pyproject-flake8 flake8-black flake8-isort
12 | touch $(VENV_DIR)/bin/activate
13 |
14 | clean:
15 | rm -rf .venv/
16 | rm -rf build/
17 | rm -rf .eggs/
18 | rm -rf *.egg-info/
19 |
20 | lint: venv
21 | $(VENV_RUN); python -m pflake8 --show-source
22 |
23 | format: venv
24 | $(VENV_RUN); python -m isort .; python -m black .
25 |
26 | install: venv
27 | $(VENV_RUN); python -m pip install -e .[dev]
28 |
29 | dist: venv
30 | $(VENV_RUN); python setup.py sdist bdist_wheel
31 |
32 | publish: clean-dist venv dist
33 | $(VENV_RUN); pip install --upgrade twine; twine upload dist/*
34 |
35 | clean-dist: clean
36 | rm -rf dist/
37 |
38 | .PHONY: clean clean-dist dist install publish
39 |
--------------------------------------------------------------------------------
/prometheus/README.md:
--------------------------------------------------------------------------------
1 | # LocalStack Prometheus Metrics
2 | [](https://app.localstack.cloud/extensions/remote?url=git+https://github.com/localstack/localstack-extensions/#egg=localstack-extension-prometheus-metrics&subdirectory=prometheus)
3 |
4 | Instruments, collects, and exposes LocalStack metrics via a [Prometheus](https://prometheus.io/) endpoint.
5 |
6 | ## Installing
7 |
8 | ```bash
9 | localstack extensions install localstack-extension-prometheus-metrics
10 | ```
11 |
12 | **Note**: This plugin only supports LocalStack `>=v4.2`
13 |
14 | ## Usage
15 |
16 | Scrape metrics via the endpoint:
17 | ```bash
18 | curl localhost.localstack.cloud:4566/_extension/metrics
19 | ```
20 |
21 | ## Quickstart (Docker-Compose)
22 |
23 | See the documentation on [Automating extension installation](https://docs.localstack.cloud/user-guide/extensions/managing-extensions/#automating-extensions-installation) for more details.
24 |
25 | First, enable the extension by adding it to your LocalStack environment:
26 |
27 | ```yaml
28 | services:
29 | localstack:
30 | environment:
31 | - EXTENSION_AUTO_INSTALL=localstack-extension-prometheus-metrics
32 | ```
33 |
34 | Next, you'll need to spin up a Prometheus instance to run alongside your LocalStack container. A [configuration file](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#configuration-file) is required.
35 |
36 | ### Option 1: Using a Volume Mount (Recommended)
37 |
38 | Create `prometheus_config.yml`:
39 | ```yaml
40 | global:
41 | scrape_interval: 15s # Set the scrape interval to every 15 seconds
42 | scrape_timeout: 5s # Set the scrape request timeout to 5 seconds
43 | # Scrape configuration for LocalStack metrics
44 | scrape_configs:
45 | - job_name: 'localstack'
46 | static_configs:
47 | # Note: The target needs to match the LocalStack container name for the Prometheus container to resolve the endpoint.
48 | - targets: ['localstack:4566'] # Target the LocalStack Gateway.
49 | metrics_path: '/_extension/metrics' # Metrics are exposed via `/_extension/metrics` endpoint
50 | ```
51 |
52 | And mount it on startup in your `docker-compose.yml`:
53 | ```yaml
54 | services:
55 | # ... LocalStack container should be defined
56 | prometheus:
57 | image: prom/prometheus
58 | ports:
59 | - "9090:9090"
60 | volumes:
61 | - "./prometheus_config.yml:/etc/prometheus/prometheus.yml"
62 | ```
63 |
64 | ### Option 2: Inline Configuration
65 |
66 | Using the Docker Compose top-level [configs](https://docs.docker.com/reference/compose-file/configs/):
67 | ```yaml
68 | services:
69 | prometheus:
70 | image: prom/prometheus
71 | ports:
72 | - "9090:9090"
73 | configs:
74 | - source: prometheus_config
75 | target: /etc/prometheus/prometheus.yml
76 |
77 | configs:
78 | prometheus_config:
79 | content: |
80 | global:
81 | scrape_interval: 15s
82 | scrape_timeout: 5s
83 | scrape_configs:
84 | - job_name: 'localstack'
85 | static_configs:
86 | - targets: ['localstack:4566']
87 | metrics_path: '/_extension/metrics'
88 | ```
89 |
90 | ### Full Example
91 |
92 | ```yaml
93 | services:
94 | localstack:
95 | container_name: "${LOCALSTACK_DOCKER_NAME:-localstack-main}"
96 | image: localstack/localstack-pro # required for Pro
97 | ports:
98 | - "4566:4566" # LocalStack Gateway
99 | - "4510-4559:4510-4559" # external services port range
100 | - "443:443" # LocalStack HTTPS Gateway (Pro)
101 | environment:
102 | - LOCALSTACK_AUTH_TOKEN=${LOCALSTACK_AUTH_TOKEN:?} # required for Pro
103 | - DEBUG=${DEBUG:-0}
104 | - PERSISTENCE=${PERSISTENCE:-0}
105 | - EXTENSION_AUTO_INSTALL=localstack-extension-prometheus-metrics
106 | volumes:
107 | - "${LOCALSTACK_VOLUME_DIR:-./volume}:/var/lib/localstack"
108 | - "/var/run/docker.sock:/var/run/docker.sock"
109 |
110 | prometheus:
111 | image: prom/prometheus
112 | ports:
113 | - "9090:9090"
114 | volumes:
115 | - "./prometheus_config.yml:/etc/prometheus/prometheus.yml" # Assumes prometheus_config.yml exists in your CWD
116 | ```
117 |
118 | ## Metrics
119 |
120 | The Prometheus extension exposes various LocalStack and system metrics through the `/_extension/metrics` endpoint.
121 |
122 | For a complete list of available metrics, view the:
123 | - [LocalStack Metrics documentation](./docs/localstack_metrics.md)
124 | - [System Metrics documentation](./docs/system_metrics.md)
125 | - Otherwise, visit the endpoint directly at `localhost.localstack.cloud:4566/_extension/metrics` when LocalStack is running.
126 |
127 | We've also included a [collection of PromQL queries](./docs/event_analysis.md) that are useful for analyzing LocalStack event source mappings performance.
128 |
129 | ## Licensing
130 |
131 | * [client_python](https://github.com/prometheus/client_python) is licensed under the Apache License version 2.
--------------------------------------------------------------------------------
/prometheus/docs/event_analysis.md:
--------------------------------------------------------------------------------
1 | # PromQL Queries for Event Processing Statistics
2 |
3 | The following queries can be used to analyse performance of LocalStack's event processing capabilties.
4 |
5 | ## Average Propagation Delay from Event Source to Poller
6 |
7 | The average amount of time a record has to wait before being processed during the last 5 minutes. A high propagation delay indicates that our event pollers are taking too long to ingest new events from an event source.
8 |
9 | ```
10 | rate(localstack_event_propagation_delay_seconds_sum[5m]) / rate(localstack_event_propagation_delay_seconds_count[5m])
11 | ```
12 |
13 | **Example**:
14 | 
15 |
16 | ## Batch Efficiency
17 |
18 | A ratio showing how efficiently are our pollers retrieving records from an event source relative to how large their maximum batch size is. A higher number indicates that batch sizes could be increased.
19 |
20 | ```
21 | rate(localstack_batch_size_efficiency_ratio_sum[1m]) / rate(localstack_batch_size_efficiency_ratio_count[1m])
22 | ```
23 |
24 | Example:
25 | 
26 |
27 | ## Records Per Poll
28 |
29 | The average number of records being pulled in by an event poller per minute. When used in conjunction with batch efficiency, you can interpret the performance of your batching configuration.
30 |
31 | ```
32 | rate(localstack_records_per_poll_sum[1m]) / rate(localstack_records_per_poll_count[1m])
33 | ```
34 |
35 | Example:
36 |
37 | 
38 |
39 | ## In-Flight Events
40 |
41 | Gauges how many events are currently being processed by a target at a given point in time. If event processing is taking long, this is a good way of measuring back-pressure on the system.
42 |
43 | ```
44 | localstack_in_flight_events
45 | ```
46 |
47 | Example:
48 | 
49 |
50 | ## Event Processing Duration
51 |
52 | The average duration per minute that targets are processing events for.
53 |
54 | ```
55 | rate(localstack_process_event_duration_seconds_sum[1m]) / rate(localstack_process_event_duration_seconds_count[1m])
56 | ```
57 |
58 | Example:
59 |
60 | 
61 |
62 | ## High Latency Event Processing
63 |
64 | Retrieve the 95th percentile of processing times in a 5m interval grouped by LocalStack service and operation. Useful for analysing the tail-latency of event processing since this is likely where bottlenecks in performance start to show.
65 |
66 | ```
67 | histogram_quantile(0.95, sum by(service, operation, le) (rate(localstack_request_processing_duration_seconds_bucket[5m])))
68 | ```
69 |
70 | Example:
71 | 
72 |
73 | ## Empty Poll Responses
74 |
75 | The approximate number of empty poll requests in a 5 minute interval.
76 |
77 | ```
78 | rate(localstack_poll_miss_total[5m]) * 60
79 | ```
80 |
81 | Example:
82 | 
83 |
84 | ## Number of LocalStack requests Processed
85 |
86 | The average number of request processed by the LocalStack gateway per minute. This is grouped by service type (i.e SQS) and operation type (i.e ReceiveMessage)
87 |
88 | ```
89 | sum by(service, operation) (rate(localstack_request_processing_duration_seconds_count[1m]) * 60)
90 | ```
91 |
92 | Example:
93 | 
94 |
95 | ## In-Flight Requests Against LocalStack Gateway
96 |
97 | Measures how many requests the Kinesis, SQS, DynamoDB, and Lambda services are currently processing in a given minute interval. Useful for seeing how hard a given service is currently being hit and the operation type.
98 |
99 | ```
100 | sum_over_time(localstack_in_flight_requests{service=~"dynamodb|kinesis|sqs|lambda"}[1m])
101 | ```
102 |
103 | Example:
104 | 
--------------------------------------------------------------------------------
/prometheus/docs/images/avg_propagation_delay.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/localstack/localstack-extensions/9519b7b9f7435618d5f9dc9895000fdf97000d1d/prometheus/docs/images/avg_propagation_delay.png
--------------------------------------------------------------------------------
/prometheus/docs/images/batch_efficiency_ratio.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/localstack/localstack-extensions/9519b7b9f7435618d5f9dc9895000fdf97000d1d/prometheus/docs/images/batch_efficiency_ratio.png
--------------------------------------------------------------------------------
/prometheus/docs/images/empty_poll_responses.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/localstack/localstack-extensions/9519b7b9f7435618d5f9dc9895000fdf97000d1d/prometheus/docs/images/empty_poll_responses.png
--------------------------------------------------------------------------------
/prometheus/docs/images/event_processing_duration.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/localstack/localstack-extensions/9519b7b9f7435618d5f9dc9895000fdf97000d1d/prometheus/docs/images/event_processing_duration.png
--------------------------------------------------------------------------------
/prometheus/docs/images/high_latency_event_processing.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/localstack/localstack-extensions/9519b7b9f7435618d5f9dc9895000fdf97000d1d/prometheus/docs/images/high_latency_event_processing.png
--------------------------------------------------------------------------------
/prometheus/docs/images/in_flight_events.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/localstack/localstack-extensions/9519b7b9f7435618d5f9dc9895000fdf97000d1d/prometheus/docs/images/in_flight_events.png
--------------------------------------------------------------------------------
/prometheus/docs/images/in_flight_requests.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/localstack/localstack-extensions/9519b7b9f7435618d5f9dc9895000fdf97000d1d/prometheus/docs/images/in_flight_requests.png
--------------------------------------------------------------------------------
/prometheus/docs/images/records_per_poll.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/localstack/localstack-extensions/9519b7b9f7435618d5f9dc9895000fdf97000d1d/prometheus/docs/images/records_per_poll.png
--------------------------------------------------------------------------------
/prometheus/docs/images/requests_processed.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/localstack/localstack-extensions/9519b7b9f7435618d5f9dc9895000fdf97000d1d/prometheus/docs/images/requests_processed.png
--------------------------------------------------------------------------------
/prometheus/docs/localstack_metrics.md:
--------------------------------------------------------------------------------
1 | # LocalStack Metrics
2 |
3 | ## LocalStack Core/Request Handling Metrics
4 |
5 | `localstack_request_processing_duration_seconds`
6 |
7 | - **Description:** Time spent processing LocalStack service requests. This is done at the handler chain and is calculated as the duration from first *request handler* to the final *response handler*.
8 | - **Labels:** `service`, `operation`, `status`, `status_code`
9 | - **Type:** histogram
10 |
11 | `localstack_in_flight_requests`
12 |
13 | - **Description:** Total number of currently in-flight requests. This is a live number, and will be influenced by the scraping interval.
14 | - **Labels:** `service`, `operation`
15 | - **Type:** gauge
16 |
17 | ## LocalStack Event Poll Operation Metrics
18 |
19 | `localstack_records_per_poll`
20 |
21 | - **Description:** Number of records/events received in each poll operation
22 | - **Labels:** `event_source`, `event_target`
23 | - **Type:** histogram
24 |
25 | `localstack_poll_events_duration_seconds`
26 |
27 | - **Description:** Duration of each poll call in seconds
28 | - **Labels:** `event_source`, `event_target`
29 | - **Type:** histogram
30 |
31 | `localstack_poll_miss_total`
32 |
33 | - **Description:** Count of poll events with empty responses
34 | - **Labels:** `event_source`, `event_target`
35 | - **Type:** counter
36 |
37 | `localstack_batch_size_efficiency_ratio`
38 |
39 | - **Description:** Ratio of records received to configured maximum batch size
40 | - **Labels:** `event_source`, `event_target`
41 | - **Type:** histogram
42 | - **Note:** This is useful for finding whether the configured batch size is efficiently pulling records. A higher number indicates that a configured `BatchSize` could be increased.
43 |
44 | `localstack_batch_window_efficiency_ratio` (Not currently instrumented)
45 |
46 | - **Description:** Ratio of poll duration to configured maximum batch window length
47 | - **Labels:** `event_source`, `event_target`
48 | - **Type:** histogram
49 | - **Note:** Measures what proportion of the configured maximum batch window (set by `MaximumBatchingWindowInSeconds`) was actually used before returning. A lower ratio indicates that events were received quickly without needing to wait for the full window duration and that a window could be decreased.
50 |
51 | ## LocalStack Event Processing Metrics
52 |
53 | `localstack_processed_events_total`
54 |
55 | - **Description:** Total number of events processed
56 | - **Labels:** `event_source`, `event_target`, `status`
57 | - **Type:** counter
58 |
59 | `localstack_in_flight_events`
60 |
61 | - **Description:** Total number of event batches currently being processed by the target
62 | - **Labels:** `event_source`, `event_target`
63 | - **Type:** gauge
64 |
65 | `localstack_event_propagation_delay_seconds`
66 |
67 | - **Description:** End-to-end latency between event creation (at source) until just before being sent to a target for processing.
68 | - **Labels:** `event_source`, `event_target`
69 | - **Type:** histogram
70 |
71 | `localstack_event_processing_errors_total`
72 |
73 | - **Description:** Total number of event processing errors
74 | - **Labels:** `event_source`, `event_target`, `error_type`
75 | - **Type:** counter
76 |
--------------------------------------------------------------------------------
/prometheus/docs/system_metrics.md:
--------------------------------------------------------------------------------
1 | # Sytem-level Metrics
2 |
3 | ## Garbage Collection Metrics
4 |
5 | `_gc_objects_collected_total`
6 |
7 | - **Description:** Number of objects collected during garbage collection
8 | - **Labels:** `generation`
9 | - **Type:** counter
10 |
11 | `_gc_objects_uncollectable_total`
12 |
13 | - **Description:** Number of uncollectable objects found during garbage collection
14 | - **Labels:** `generation`
15 | - **Type:** counter
16 |
17 | `_gc_collections_total`
18 |
19 | - **Description:** Number of times this generation was collected
20 | - **Labels:** `generation`
21 | - **Type:** counter
22 |
23 | ## Environment Metrics
24 |
25 | `_info`
26 |
27 | - **Description:** platform information
28 | - **Labels:** `implementation`, `major`, `minor`, `patchlevel`, `version`
29 | - **Type:** gauge
30 |
31 | ## Process Metrics
32 |
33 | `process_virtual_memory_bytes`
34 |
35 | - **Description:** Virtual memory size in bytes
36 | - **Labels:** none
37 | - **Type:** gauge
38 |
39 | `process_resident_memory_bytes`
40 |
41 | - **Description:** Resident memory size in bytes
42 | - **Labels:** none
43 | - **Type:** gauge
44 |
45 | `process_start_time_seconds`
46 |
47 | - **Description:** Start time of the process since unix epoch in seconds
48 | - **Labels:** none
49 | - **Type:** gauge
50 |
51 | `process_cpu_seconds_total`
52 |
53 | - **Description:** Total user and system CPU time spent in seconds
54 | - **Labels:** none
55 | - **Type:** counter
56 |
57 | `process_open_fds`
58 |
59 | - **Description:** Number of open file descriptors
60 | - **Labels:** none
61 | - **Type:** gauge
62 |
63 | `process_max_fds`
64 |
65 | - **Description:** Maximum number of open file descriptors
66 | - **Labels:** none
67 | - **Type:** gauge
68 |
--------------------------------------------------------------------------------
/prometheus/localstack_prometheus/__init__.py:
--------------------------------------------------------------------------------
1 | name = "localstack_prometheus"
2 |
--------------------------------------------------------------------------------
/prometheus/localstack_prometheus/expose.py:
--------------------------------------------------------------------------------
1 | from localstack.extensions.api import http
2 | from prometheus_client.exposition import choose_encoder
3 |
4 |
5 | def retrieve_metrics(request: http.Request):
6 | """Expose the Prometheus metrics"""
7 | _generate_latest_metrics, content_type = choose_encoder(request.headers.get("Content-Type", ""))
8 | data = _generate_latest_metrics()
9 | return http.Response(response=data, status=200, mimetype=content_type)
10 |
--------------------------------------------------------------------------------
/prometheus/localstack_prometheus/extension.py:
--------------------------------------------------------------------------------
1 | import logging
2 |
3 | from localstack.aws.chain import (
4 | CompositeExceptionHandler,
5 | CompositeHandler,
6 | CompositeResponseHandler,
7 | )
8 | from localstack.extensions.api import Extension, http
9 |
10 | from localstack_prometheus.expose import retrieve_metrics
11 | from localstack_prometheus.handler import RequestMetricsHandler, ResponseMetricsHandler
12 | from localstack_prometheus.instruments.patch import (
13 | apply_lambda_tracking_patches,
14 | apply_poller_tracking_patches,
15 | )
16 |
17 | LOG = logging.getLogger(__name__)
18 |
19 |
20 | class PrometheusMetricsExtension(Extension):
21 | name = "prometheus"
22 |
23 | def on_extension_load(self):
24 | apply_lambda_tracking_patches()
25 | apply_poller_tracking_patches()
26 | LOG.debug("PrometheusMetricsExtension: extension is loaded")
27 |
28 | def on_platform_start(self):
29 | LOG.debug("PrometheusMetricsExtension: localstack is starting")
30 |
31 | def on_platform_ready(self):
32 | LOG.debug("PrometheusMetricsExtension: localstack is running")
33 |
34 | def update_gateway_routes(self, router: http.Router[http.RouteHandler]):
35 | router.add("/_extension/metrics", retrieve_metrics)
36 | LOG.debug("Added /metrics endpoint for Prometheus metrics")
37 |
38 | def update_request_handlers(self, handlers: CompositeHandler):
39 | # Prepend the RequestMetricsHandler to handlers ensuring it runs first
40 | handlers.handlers.insert(0, RequestMetricsHandler())
41 |
42 | def update_response_handlers(self, handlers: CompositeResponseHandler):
43 | # Insert the ResponseMetricsHandler as the final handler in the chain.
44 | handlers.handlers.append(ResponseMetricsHandler())
45 |
46 | def update_exception_handlers(self, handlers: CompositeExceptionHandler):
47 | # TODO
48 | pass
49 |
--------------------------------------------------------------------------------
/prometheus/localstack_prometheus/handler.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import time
3 |
4 | from localstack.aws.api import RequestContext
5 | from localstack.aws.chain import Handler, HandlerChain
6 | from localstack.http import Response
7 |
8 | from localstack_prometheus.metrics.core import (
9 | LOCALSTACK_IN_FLIGHT_REQUESTS,
10 | LOCALSTACK_REQUEST_PROCESSING_DURATION_SECONDS,
11 | )
12 |
13 | LOG = logging.getLogger(__name__)
14 |
15 |
16 | class TimedRequestContext(RequestContext):
17 | start_time: float | None
18 |
19 |
20 | class RequestMetricsHandler(Handler):
21 | """
22 | Handler that records the start time of incoming requests
23 | """
24 |
25 | def __call__(self, chain: HandlerChain, context: TimedRequestContext, response: Response):
26 | # Record the start time
27 | context.start_time = time.perf_counter()
28 |
29 | # Do not record metrics if no service operation information is found
30 | if not context.service_operation:
31 | return
32 |
33 | service, operation = context.service_operation
34 | LOCALSTACK_IN_FLIGHT_REQUESTS.labels(service=service, operation=operation).inc()
35 |
36 |
37 | class ResponseMetricsHandler(Handler):
38 | """
39 | Handler that records metrics when a response is ready
40 | """
41 |
42 | def __call__(self, chain: HandlerChain, context: TimedRequestContext, response: Response):
43 | # Do not record metrics if no service operation information is found
44 | if not context.service_operation:
45 | return
46 |
47 | service, operation = context.service_operation
48 | LOCALSTACK_IN_FLIGHT_REQUESTS.labels(service=service, operation=operation).dec()
49 |
50 | # Do not record if response is None
51 | if response is None:
52 | return
53 |
54 | # Do not record if no start_time attribute is found
55 | if not hasattr(context, "start_time") or context.start_time is None:
56 | return
57 |
58 | duration = time.perf_counter() - context.start_time
59 |
60 | if (ex := context.service_exception) is not None:
61 | status = ex.code
62 | else:
63 | status = "success"
64 |
65 | status_code = str(response.status_code)
66 |
67 | LOCALSTACK_REQUEST_PROCESSING_DURATION_SECONDS.labels(
68 | service=service,
69 | operation=operation,
70 | status=status,
71 | status_code=status_code,
72 | ).observe(duration)
73 |
--------------------------------------------------------------------------------
/prometheus/localstack_prometheus/instruments/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/localstack/localstack-extensions/9519b7b9f7435618d5f9dc9895000fdf97000d1d/prometheus/localstack_prometheus/instruments/__init__.py
--------------------------------------------------------------------------------
/prometheus/localstack_prometheus/instruments/lambda_.py:
--------------------------------------------------------------------------------
1 | import contextlib
2 | from typing import ContextManager
3 |
4 | from localstack.services.lambda_.invocation.assignment import AssignmentService
5 | from localstack.services.lambda_.invocation.docker_runtime_executor import (
6 | DockerRuntimeExecutor,
7 | )
8 | from localstack.services.lambda_.invocation.execution_environment import (
9 | ExecutionEnvironment,
10 | )
11 | from localstack.services.lambda_.invocation.lambda_models import (
12 | FunctionVersion,
13 | InitializationType,
14 | )
15 |
16 | from localstack_prometheus.metrics.lambda_ import (
17 | LOCALSTACK_LAMBDA_ENVIRONMENT_ACTIVE,
18 | LOCALSTACK_LAMBDA_ENVIRONMENT_CONTAINERS_RUNNING,
19 | LOCALSTACK_LAMBDA_ENVIRONMENT_START_TOTAL,
20 | )
21 |
22 |
23 | def count_version_environments(
24 | assignment_service: AssignmentService, version_manager_id: str, prov_type: InitializationType
25 | ):
26 | """Count environments of a specific provisioning type for a specific version manager"""
27 | return sum(
28 | env.initialization_type == prov_type
29 | for env in assignment_service.environments.get(version_manager_id, {}).values()
30 | )
31 |
32 |
33 | def count_service_environments(
34 | assignment_service: AssignmentService, prov_type: InitializationType
35 | ):
36 | """Count environments of a specific provisioning type across all function versions"""
37 | return sum(
38 | count_version_environments(assignment_service, version_manager_id, prov_type)
39 | for version_manager_id in assignment_service.environments
40 | )
41 |
42 |
43 | def init_assignment_service_with_metrics(fn, self: AssignmentService):
44 | fn(self)
45 | # Initialise these once, with all subsequent calls being evaluated at collection time.
46 | LOCALSTACK_LAMBDA_ENVIRONMENT_ACTIVE.labels(
47 | provisioning_type="provisioned-concurrency"
48 | ).set_function(lambda: count_service_environments(self, "provisioned-concurrency"))
49 |
50 | LOCALSTACK_LAMBDA_ENVIRONMENT_ACTIVE.labels(provisioning_type="on-demand").set_function(
51 | lambda: count_service_environments(self, "on-demand")
52 | )
53 |
54 |
55 | def tracked_docker_start(fn, self: DockerRuntimeExecutor, env_vars: dict[str, str]):
56 | fn(self, env_vars)
57 | LOCALSTACK_LAMBDA_ENVIRONMENT_CONTAINERS_RUNNING.inc()
58 |
59 |
60 | def tracked_docker_stop(fn, self: DockerRuntimeExecutor):
61 | fn(self)
62 | LOCALSTACK_LAMBDA_ENVIRONMENT_CONTAINERS_RUNNING.dec()
63 |
64 |
65 | @contextlib.contextmanager
66 | def tracked_get_environment(
67 | fn,
68 | self: AssignmentService,
69 | version_manager_id: str,
70 | function_version: FunctionVersion,
71 | provisioning_type: InitializationType,
72 | ) -> ContextManager[ExecutionEnvironment]:
73 | applicable_env_count = count_version_environments(self, version_manager_id, provisioning_type)
74 | # If there are no applicable environments, this will be a cold start.
75 | # Otherwise, it'll be warm.
76 | start_type = "warm" if applicable_env_count > 0 else "cold"
77 | LOCALSTACK_LAMBDA_ENVIRONMENT_START_TOTAL.labels(
78 | start_type=start_type, provisioning_type=provisioning_type
79 | ).inc()
80 | with fn(self, version_manager_id, function_version, provisioning_type) as execution_env:
81 | yield execution_env
82 |
--------------------------------------------------------------------------------
/prometheus/localstack_prometheus/instruments/patch.py:
--------------------------------------------------------------------------------
1 | import logging
2 |
3 | from localstack.services.lambda_.event_source_mapping.pollers.dynamodb_poller import (
4 | DynamoDBPoller,
5 | )
6 | from localstack.services.lambda_.event_source_mapping.pollers.kinesis_poller import (
7 | KinesisPoller,
8 | )
9 | from localstack.services.lambda_.event_source_mapping.pollers.sqs_poller import (
10 | SqsPoller,
11 | )
12 | from localstack.services.lambda_.event_source_mapping.senders.lambda_sender import (
13 | LambdaSender,
14 | )
15 | from localstack.services.lambda_.invocation.assignment import AssignmentService
16 | from localstack.services.lambda_.invocation.docker_runtime_executor import (
17 | DockerRuntimeExecutor,
18 | )
19 | from localstack.utils.patch import Patch, Patches
20 |
21 | from localstack_prometheus.instruments.lambda_ import (
22 | init_assignment_service_with_metrics,
23 | tracked_docker_start,
24 | tracked_docker_stop,
25 | tracked_get_environment,
26 | )
27 | from localstack_prometheus.instruments.poller import tracked_poll_events
28 | from localstack_prometheus.instruments.sender import tracked_send_events
29 | from localstack_prometheus.instruments.sqs_poller import tracked_sqs_handle_messages
30 | from localstack_prometheus.instruments.stream_poller import tracked_get_records
31 |
32 | LOG = logging.getLogger(__name__)
33 |
34 |
35 | def apply_lambda_tracking_patches():
36 | """Apply all Lambda environment metrics tracking patches in one call"""
37 | patches = Patches(
38 | [
39 | # Track starting and stopping of containers function
40 | Patch.function(target=DockerRuntimeExecutor.start, fn=tracked_docker_start),
41 | Patch.function(target=DockerRuntimeExecutor.stop, fn=tracked_docker_stop),
42 | # Track cold and warm starts
43 | Patch.function(target=AssignmentService.get_environment, fn=tracked_get_environment),
44 | # Track and collect all environment
45 | Patch.function(
46 | target=AssignmentService.__init__, fn=init_assignment_service_with_metrics
47 | ),
48 | ]
49 | )
50 |
51 | patches.apply()
52 | LOG.debug("Applied all Lambda environment tracking patches")
53 | return patches
54 |
55 |
56 | def apply_poller_tracking_patches():
57 | """Apply all poller metrics tracking patches in one call"""
58 | patches = Patches(
59 | [
60 | # Track entire poll_events function
61 | Patch.function(target=SqsPoller.poll_events, fn=tracked_poll_events),
62 | Patch.function(target=KinesisPoller.poll_events, fn=tracked_poll_events),
63 | Patch.function(target=DynamoDBPoller.poll_events, fn=tracked_poll_events),
64 | # Track when events get sent to the target lambda
65 | Patch.function(target=LambdaSender.send_events, fn=tracked_send_events),
66 | # TODO: Standardise a single abstract method that all Poller subclasses can use to fetch records
67 | # SQS-specific patches
68 | Patch.function(target=SqsPoller.handle_messages, fn=tracked_sqs_handle_messages),
69 | # Stream-specific patches
70 | Patch.function(target=KinesisPoller.get_records, fn=tracked_get_records),
71 | Patch.function(target=DynamoDBPoller.get_records, fn=tracked_get_records),
72 | # TODO: How should KafkaPollers be handled?
73 | ]
74 | )
75 |
76 | # TODO: Investigate patching subclasses of Poller and Sender to ensure all children have changes
77 | # since currently, Pipes Senders and Kafka Pollers are unsupported.
78 |
79 | patches.apply()
80 | LOG.debug("Applied all poller event and latency tracking patches")
81 | return patches
82 |
--------------------------------------------------------------------------------
/prometheus/localstack_prometheus/instruments/poller.py:
--------------------------------------------------------------------------------
1 | import logging
2 |
3 | from localstack.services.lambda_.event_source_mapping.pollers.poller import (
4 | EmptyPollResultsException,
5 | Poller,
6 | )
7 |
8 | from localstack_prometheus.instruments.util import get_event_target_from_procesor
9 | from localstack_prometheus.metrics.event_polling import (
10 | LOCALSTACK_POLL_EVENTS_DURATION_SECONDS,
11 | LOCALSTACK_POLL_MISS_TOTAL,
12 | LOCALSTACK_POLLED_BATCH_SIZE_EFFICIENCY_RATIO,
13 | )
14 | from localstack_prometheus.metrics.event_processing import (
15 | LOCALSTACK_EVENT_PROCESSING_ERRORS_TOTAL,
16 | )
17 |
18 | LOG = logging.getLogger(__name__)
19 |
20 |
21 | def tracked_poll_events(fn, self: Poller):
22 | """Track metrics for poll_events operations"""
23 | event_source = self.event_source()
24 | event_target = get_event_target_from_procesor(self.processor)
25 |
26 | try:
27 | with LOCALSTACK_POLL_EVENTS_DURATION_SECONDS.labels(
28 | event_source=event_source, event_target=event_target
29 | ).time():
30 | fn(self)
31 | except EmptyPollResultsException:
32 | # set to 0 since it's a batch-miss
33 | LOCALSTACK_POLLED_BATCH_SIZE_EFFICIENCY_RATIO.labels(
34 | event_source=event_source, event_target=event_target
35 | ).observe(0)
36 |
37 | LOCALSTACK_POLL_MISS_TOTAL.labels(
38 | event_source=event_source, event_target=event_target
39 | ).inc()
40 |
41 | raise
42 | except Exception as e:
43 | error_type = type(e).__name__
44 | LOCALSTACK_EVENT_PROCESSING_ERRORS_TOTAL.labels(
45 | event_source=event_source,
46 | event_target=event_target,
47 | error_type=error_type,
48 | ).inc()
49 | raise
50 |
--------------------------------------------------------------------------------
/prometheus/localstack_prometheus/instruments/sender.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import time
3 |
4 | from localstack.services.lambda_.event_source_mapping.senders.sender import Sender
5 |
6 | from localstack_prometheus.metrics.event_processing import (
7 | LOCALSTACK_EVENT_PROCESSING_ERRORS_TOTAL,
8 | LOCALSTACK_EVENT_PROPAGATION_DELAY_SECONDS,
9 | LOCALSTACK_IN_FLIGHT_EVENTS_GAUGE,
10 | LOCALSTACK_PROCESS_EVENT_DURATION_SECONDS,
11 | LOCALSTACK_PROCESSED_EVENTS_TOTAL,
12 | )
13 |
14 | LOG = logging.getLogger(__name__)
15 |
16 |
17 | def tracked_send_events(fn, self: Sender, events: list[dict] | dict):
18 | """Track metrics for event sending operations"""
19 | LOG.debug("Tracking send_events call with %d events", len(events))
20 | original_events = events.copy()
21 |
22 | if not events:
23 | # This shouldn't happen but cater for it anyway
24 | return fn(self, events)
25 |
26 | total_events = len(events)
27 | event_target = self.event_target()
28 |
29 | event_source = ""
30 | if isinstance(events, dict) and (es := events.get("eventSource")):
31 | event_source = es
32 | elif isinstance(events, list) and (es := events[0].get("eventSource")):
33 | event_source = es
34 |
35 | # HACK: Workaround for Kafka since events are a dict
36 | if event_source in {"aws:kafka", "SelfManagedKafka"} and isinstance(events, dict):
37 | # Need to flatten 2d array since records are split by topic-partition key
38 | events = sum(events.get("records", []), [])
39 |
40 | start_time = time.time()
41 | for event in events:
42 | if not isinstance(event, dict):
43 | continue
44 |
45 | if dynamodb := event.get("dynamodb", {}):
46 | if creation_time := dynamodb.get("ApproximateCreationDateTime"):
47 | delay = start_time - float(creation_time)
48 | LOCALSTACK_EVENT_PROPAGATION_DELAY_SECONDS.labels(
49 | event_source=event_source or "aws:dynamodb",
50 | event_target=event_target,
51 | ).observe(delay)
52 |
53 | elif kinesis := event.get("kinesis", {}):
54 | if arrival_time := kinesis.get("approximateArrivalTimestamp"):
55 | delay = start_time - float(arrival_time)
56 | LOCALSTACK_EVENT_PROPAGATION_DELAY_SECONDS.labels(
57 | event_source=event_source or "aws:kinesis",
58 | event_target=event_target,
59 | ).observe(delay)
60 |
61 | elif sqs_attributes := event.get("attributes", {}):
62 | if sent_timestamp := sqs_attributes.get("SentTimestamp"):
63 | delay = start_time - (float(sent_timestamp) / 1000.0)
64 | LOCALSTACK_EVENT_PROPAGATION_DELAY_SECONDS.labels(
65 | event_source=event_source or "aws:sqs", event_target=event_target
66 | ).observe(delay)
67 | elif event_source in {"aws:kafka", "SelfManagedKafka"}:
68 | if sent_timestamp := event.get("timestamp"):
69 | delay = start_time - (float(sent_timestamp) / 1000.0)
70 | LOCALSTACK_EVENT_PROPAGATION_DELAY_SECONDS.labels(
71 | event_source=event_source, event_target=event_target
72 | ).observe(delay)
73 |
74 | LOCALSTACK_IN_FLIGHT_EVENTS_GAUGE.labels(
75 | event_source=event_source,
76 | event_target=event_target,
77 | ).inc()
78 |
79 | try:
80 | with LOCALSTACK_PROCESS_EVENT_DURATION_SECONDS.labels(
81 | event_source=event_source, event_target=event_target
82 | ).time():
83 | result = fn(self, original_events)
84 | LOCALSTACK_PROCESSED_EVENTS_TOTAL.labels(
85 | event_source=event_source, event_target=event_target, status="success"
86 | ).inc(total_events)
87 |
88 | return result
89 |
90 | except Exception as e:
91 | error_type = type(e).__name__
92 | LOCALSTACK_EVENT_PROCESSING_ERRORS_TOTAL.labels(
93 | event_source=event_source, event_target=event_target, error_type=error_type
94 | ).inc()
95 |
96 | LOCALSTACK_PROCESSED_EVENTS_TOTAL.labels(
97 | event_source=event_source, event_target=event_target, status="error"
98 | ).inc(total_events)
99 | raise
100 | finally:
101 | LOCALSTACK_IN_FLIGHT_EVENTS_GAUGE.labels(
102 | event_source=event_source,
103 | event_target=event_target,
104 | ).dec()
105 |
--------------------------------------------------------------------------------
/prometheus/localstack_prometheus/instruments/sqs_poller.py:
--------------------------------------------------------------------------------
1 | import logging
2 |
3 | from localstack.services.lambda_.event_source_mapping.pollers.sqs_poller import (
4 | SqsPoller,
5 | )
6 |
7 | from localstack_prometheus.instruments.util import get_event_target_from_procesor
8 | from localstack_prometheus.metrics.event_polling import (
9 | LOCALSTACK_POLLED_BATCH_SIZE_EFFICIENCY_RATIO,
10 | LOCALSTACK_RECORDS_PER_POLL,
11 | )
12 |
13 | LOG = logging.getLogger(__name__)
14 |
15 |
16 | # TODO: Refactor Poller to all use a get_records method
17 | def tracked_sqs_handle_messages(fn, self: SqsPoller, messages: list[dict]):
18 | """SQS-specific handler for tracking and processing polled messages"""
19 | event_source = self.event_source()
20 | event_target = get_event_target_from_procesor(self.processor)
21 |
22 | message_count = len(messages)
23 | if message_count > 0:
24 | LOCALSTACK_RECORDS_PER_POLL.labels(
25 | event_source=event_source,
26 | event_target=event_target,
27 | ).observe(message_count)
28 |
29 | if self.batch_size > 0:
30 | LOCALSTACK_POLLED_BATCH_SIZE_EFFICIENCY_RATIO.labels(
31 | event_source=event_source, event_target=event_target
32 | ).observe(message_count / self.batch_size)
33 |
34 | return fn(self, messages)
35 |
--------------------------------------------------------------------------------
/prometheus/localstack_prometheus/instruments/stream_poller.py:
--------------------------------------------------------------------------------
1 | from localstack.services.lambda_.event_source_mapping.pollers.stream_poller import (
2 | StreamPoller,
3 | )
4 |
5 | from localstack_prometheus.instruments.util import get_event_target_from_procesor
6 | from localstack_prometheus.metrics.event_polling import (
7 | LOCALSTACK_POLLED_BATCH_SIZE_EFFICIENCY_RATIO,
8 | LOCALSTACK_POLLED_BATCH_WINDOW_EFFICIENCY_RATIO,
9 | LOCALSTACK_RECORDS_PER_POLL,
10 | )
11 |
12 |
13 | def tracked_get_records(fn, self: StreamPoller, shard_iterator: str):
14 | """Stream-specific handler for retrieving events from a shard iterator (DynamoDB Streams & Kinesis)"""
15 |
16 | event_source = self.event_source()
17 | event_target = get_event_target_from_procesor(self.processor)
18 |
19 | with LOCALSTACK_POLLED_BATCH_WINDOW_EFFICIENCY_RATIO.labels(
20 | event_source=event_source, event_target=event_target
21 | ).time():
22 | response = fn(self, shard_iterator)
23 | records = response.get("Records", [])
24 | record_count = len(records)
25 |
26 | if record_count > 0:
27 | LOCALSTACK_RECORDS_PER_POLL.labels(
28 | event_source=event_source,
29 | event_target=event_target,
30 | ).observe(record_count)
31 |
32 | if (batch_size := self.stream_parameters.get("BatchSize")) and batch_size > 0:
33 | LOCALSTACK_POLLED_BATCH_SIZE_EFFICIENCY_RATIO.labels(
34 | event_source=event_source, event_target=event_target
35 | ).observe(record_count / batch_size)
36 |
37 | return response
38 |
--------------------------------------------------------------------------------
/prometheus/localstack_prometheus/instruments/util.py:
--------------------------------------------------------------------------------
1 | from localstack.services.lambda_.event_source_mapping.esm_event_processor import (
2 | EsmEventProcessor,
3 | EventProcessor,
4 | )
5 |
6 |
7 | def get_event_target_from_procesor(processor: EventProcessor) -> str:
8 | if isinstance(processor, EsmEventProcessor):
9 | return "aws:lambda"
10 |
11 | if hasattr(processor, "event_target") and callable(processor.event_target):
12 | return processor.event_target()
13 |
14 | return "unknown"
15 |
--------------------------------------------------------------------------------
/prometheus/localstack_prometheus/metrics/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/localstack/localstack-extensions/9519b7b9f7435618d5f9dc9895000fdf97000d1d/prometheus/localstack_prometheus/metrics/__init__.py
--------------------------------------------------------------------------------
/prometheus/localstack_prometheus/metrics/core.py:
--------------------------------------------------------------------------------
1 | from prometheus_client import Gauge, Histogram
2 |
3 | # Core request handling metrics
4 | LOCALSTACK_REQUEST_PROCESSING_DURATION_SECONDS = Histogram(
5 | "localstack_request_processing_duration_seconds",
6 | "Time spent processing LocalStack service requests",
7 | ["service", "operation", "status", "status_code"],
8 | buckets=[0.005, 0.05, 0.5, 5, 30, 60, 300, 900, 3600],
9 | )
10 |
11 | LOCALSTACK_IN_FLIGHT_REQUESTS = Gauge(
12 | "localstack_in_flight_requests",
13 | "Total number of currently in-flight requests",
14 | ["service", "operation"],
15 | )
16 |
--------------------------------------------------------------------------------
/prometheus/localstack_prometheus/metrics/event_polling.py:
--------------------------------------------------------------------------------
1 | from prometheus_client import Counter, Histogram
2 |
3 | # Poll operation tracking
4 | LOCALSTACK_RECORDS_PER_POLL = Histogram(
5 | "localstack_records_per_poll",
6 | "Number of records/events received in each poll operation",
7 | ["event_source", "event_target"],
8 | buckets=[1, 10, 25, 50, 100, 250, 500, 1000, 10_000],
9 | )
10 |
11 | LOCALSTACK_POLL_EVENTS_DURATION_SECONDS = Histogram(
12 | "localstack_poll_events_duration_seconds",
13 | "Duration of each poll call in seconds",
14 | ["event_source", "event_target"],
15 | buckets=[0.005, 0.05, 0.5, 5, 30, 60, 300, 900, 3600],
16 | )
17 |
18 | LOCALSTACK_POLL_MISS_TOTAL = Counter(
19 | "localstack_poll_miss_total",
20 | "Count of poll events with empty responses.",
21 | ["event_source", "event_target"],
22 | )
23 |
24 | LOCALSTACK_POLLED_BATCH_SIZE_EFFICIENCY_RATIO = Histogram(
25 | "localstack_batch_size_efficiency_ratio",
26 | "Ratio of records received to configured maximum batch size",
27 | ["event_source", "event_target"],
28 | buckets=[0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0],
29 | )
30 |
31 | LOCALSTACK_POLLED_BATCH_WINDOW_EFFICIENCY_RATIO = Histogram(
32 | "localstack_batch_window_efficiency_ratio",
33 | "Ratio poll duration to configured maximum batch window length",
34 | ["event_source", "event_target"],
35 | buckets=[0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0],
36 | )
37 |
--------------------------------------------------------------------------------
/prometheus/localstack_prometheus/metrics/event_processing.py:
--------------------------------------------------------------------------------
1 | from prometheus_client import Counter, Gauge, Histogram
2 |
3 | # Event processing metrics
4 | LOCALSTACK_PROCESSED_EVENTS_TOTAL = Counter(
5 | "localstack_processed_events_total",
6 | "Total number of events processed",
7 | ["event_source", "event_target", "status"],
8 | )
9 |
10 | LOCALSTACK_PROCESS_EVENT_DURATION_SECONDS = Histogram(
11 | "localstack_process_event_duration_seconds",
12 | "Duration to process a polled event from start to completion",
13 | ["event_source", "event_target"],
14 | buckets=[0.005, 0.05, 0.5, 5, 30, 60, 300, 900, 3600],
15 | )
16 |
17 | LOCALSTACK_IN_FLIGHT_EVENTS_GAUGE = Gauge(
18 | "localstack_in_flight_events",
19 | "Total number of event batches currently being processed by the target",
20 | ["event_source", "event_target"],
21 | )
22 |
23 | # Performance and latency metrics
24 | LOCALSTACK_EVENT_PROPAGATION_DELAY_SECONDS = Histogram(
25 | "localstack_event_propagation_delay_seconds",
26 | "End-to-end latency between event creation (at source) until just before being sent to a target for processing.",
27 | ["event_source", "event_target"],
28 | buckets=[0.005, 0.05, 0.5, 5, 30, 60, 300, 900, 3600],
29 | )
30 |
31 | # Error tracking metrics
32 | LOCALSTACK_EVENT_PROCESSING_ERRORS_TOTAL = Counter(
33 | "localstack_event_processing_errors_total",
34 | "Total number of event processing errors",
35 | ["event_source", "event_target", "error_type"],
36 | )
37 |
--------------------------------------------------------------------------------
/prometheus/localstack_prometheus/metrics/lambda_.py:
--------------------------------------------------------------------------------
1 | from prometheus_client import Counter, Gauge
2 |
3 | # Lambda environment metrics
4 | LOCALSTACK_LAMBDA_ENVIRONMENT_START_TOTAL = Counter(
5 | "localstack_lambda_environment_start_total",
6 | "Total count of all Lambda environment starts.",
7 | ["start_type", "provisioning_type"],
8 | )
9 |
10 | LOCALSTACK_LAMBDA_ENVIRONMENT_CONTAINERS_RUNNING = Gauge(
11 | "localstack_lambda_environment_containers_running",
12 | "Number of LocalStack Lambda Docker containers currently running.",
13 | )
14 |
15 | LOCALSTACK_LAMBDA_ENVIRONMENT_ACTIVE = Gauge(
16 | "localstack_lambda_environments_active",
17 | "Number of currently active LocalStack Lambda environments.",
18 | ["provisioning_type"],
19 | )
20 |
--------------------------------------------------------------------------------
/prometheus/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | requires = ["setuptools", 'wheel', 'plux>=1.3.1']
3 | build-backend = "setuptools.build_meta"
4 |
5 | [project]
6 | name = "localstack-extension-prometheus-metrics"
7 | version = "0.1.0"
8 | description = "LocalStack Extension: Prometheus Metrics"
9 | readme = {file = "README.md", content-type = "text/markdown; charset=UTF-8"}
10 | requires-python = ">=3.9"
11 | license = {text = "Apache License 2.0"}
12 | authors = [
13 | { name = "Greg Furman", email = "greg.furman@localstack.cloud" }
14 | ]
15 | keywords = ["localstack", "localstack-extension", "extension", "prometheus", "metrics"]
16 | classifiers = []
17 | dependencies = [
18 | "prometheus-client (>=0.21.1,<0.22.0)",
19 | "localstack-core (>=4.2.0,<5.0.0)",
20 | ]
21 |
22 | [project.urls]
23 | Homepage = "https://github.com/localstack/localstack-extensions/tree/main/prometheus/README.md"
24 |
25 | [project.optional-dependencies]
26 | dev = [
27 | "localstack>=0.0.0.dev"
28 | ]
29 |
30 | [tool.black]
31 | line_length = 100
32 | include = '(localstack_prometheus/.*\.py$)'
33 |
34 | [tool.isort]
35 | profile = 'black'
36 | line_length = 100
37 |
38 | # call using pflake8
39 | [tool.flake8]
40 | max-line-length = 110
41 | ignore = 'E203,E266,E501,W503,F403'
42 | select = 'B,C,E,F,I,W,T4,B9'
43 | exclude = '.venv*,venv*,dist,*.egg-info,.git'
44 |
45 | [project.entry-points."localstack.extensions"]
46 | localstack_prometheus = "localstack_prometheus.extension:PrometheusMetricsExtension"
47 |
--------------------------------------------------------------------------------
/stripe/Makefile:
--------------------------------------------------------------------------------
1 | VENV_BIN = python3 -m venv
2 | VENV_DIR ?= .venv
3 |
4 | VENV_ACTIVATE = . $(VENV_DIR)/bin/activate
5 |
6 |
7 | venv: $(VENV_DIR)/bin/activate
8 |
9 | $(VENV_DIR)/bin/activate: setup.cfg
10 | test -d $(VENV_DIR) || $(VENV_BIN) $(VENV_DIR)
11 | $(VENV_ACTIVATE); pip install -e ".[dev]" && pip uninstall -y localstack-stripe
12 | touch $(VENV_DIR)/bin/activate
13 |
14 | clean:
15 | rm -rf build/
16 | rm -rf .eggs/
17 | rm -rf *.egg-info/
18 |
19 | clean-dist: clean
20 | rm -rf dist/
21 |
22 | format:
23 | ($(VENV_ACTIVATE); python -m isort .; python -m black . )
24 |
25 | build: venv
26 | $(VENV_ACTIVATE); python setup.py build
27 |
28 | test: venv
29 | $(VENV_ACTIVATE); python -m pytest
30 |
31 | dist: venv
32 | $(VENV_ACTIVATE); python setup.py sdist bdist_wheel
33 |
34 | install: venv
35 | $(VENV_ACTIVATE); python -m pip install -e .[dev]
36 |
37 | upload: venv dist
38 | $(VENV_ACTIVATE); pip install --upgrade twine; twine upload dist/*
39 |
40 | .PHONY: clean clean-dist format
41 |
--------------------------------------------------------------------------------
/stripe/README.md:
--------------------------------------------------------------------------------
1 | Stripe LocalStack extensions
2 | ============================
3 | [](https://app.localstack.cloud/extensions/remote?url=git+https://github.com/localstack/localstack-extensions/#egg=localstack-extension-stripe&subdirectory=stripe)
4 |
5 | A LocalStack extension that provides a mocked version of [Stripe](https://stripe.com) as a service.
6 |
7 | ## Installing
8 |
9 |
10 | ```bash
11 | localstack extensions install localstack-extension-stripe
12 | ```
13 |
14 | ## Using
15 |
16 | Once installed, you can query stripe either through `localhost:4566/stripe` or
17 | `stripe.localhost.localstack.cloud:4566`.
18 |
19 | ```bash
20 | curl stripe.localhost.localstack.cloud:4566/v1/customers \
21 | -u sk_test_12345: \
22 | -d description="Customer data for Alice"
23 | ````
24 |
25 | ## Licensing
26 |
27 | * [localstripe](https://github.com/adrienverge/localstripe) is licensed under
28 | the GNU General Public License version 3.
29 | * localstack-extension-stripe (this project) does not modify localstripe in
30 | any way
31 |
--------------------------------------------------------------------------------
/stripe/localstack_stripe/__init__.py:
--------------------------------------------------------------------------------
1 | __version__ = "0.2.0"
2 |
--------------------------------------------------------------------------------
/stripe/localstack_stripe/extension.py:
--------------------------------------------------------------------------------
1 | import atexit
2 | import logging
3 |
4 | from localstack.extensions.api import Extension, http, services
5 |
6 | LOG = logging.getLogger(__name__)
7 |
8 |
9 | class LocalstripeExtension(Extension):
10 | name = "localstripe"
11 |
12 | backend_url: str
13 |
14 | def on_platform_start(self):
15 | # start localstripe when localstack starts
16 | from . import localstripe
17 |
18 | port = services.external_service_ports.reserve_port()
19 | self.backend_url = f"http://localhost:{port}"
20 |
21 | localstripe.start(port)
22 | atexit.register(localstripe.shutdown)
23 |
24 | def update_gateway_routes(self, router: http.Router[http.RouteHandler]):
25 | # a ProxyHandler forwards all incoming requests to the backend URL
26 | endpoint = http.ProxyHandler(self.backend_url)
27 |
28 | # add path routes for localhost:4566/stripe (backwards compatibility)
29 | router.add(
30 | "/stripe",
31 | endpoint=endpoint,
32 | )
33 | router.add(
34 | "/stripe/",
35 | endpoint=endpoint,
36 | )
37 | # modern mounts
38 | router.add(
39 | "/_extension/stripe",
40 | endpoint=endpoint,
41 | )
42 | router.add(
43 | "/_extension/stripe/",
44 | endpoint=endpoint,
45 | )
46 | # add alternative host routes for stripe.localhost.localstack.cloud:4566
47 | router.add(
48 | "/",
49 | host="stripe.localhost.localstack.cloud:",
50 | endpoint=endpoint,
51 | )
52 | router.add(
53 | "/",
54 | host="stripe.localhost.localstack.cloud:",
55 | endpoint=endpoint,
56 | )
57 |
--------------------------------------------------------------------------------
/stripe/localstack_stripe/localstripe.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import sys
3 | from multiprocessing import Process
4 | from typing import Optional
5 |
6 | from localstripe.server import start as start_localstripe
7 |
8 | LOG = logging.getLogger(__name__)
9 |
10 | _process: Optional[Process] = None
11 |
12 |
13 | def _serve(port: int):
14 | sys.argv = [__file__, "--port", str(port)]
15 | return start_localstripe()
16 |
17 |
18 | def start(port: int) -> Process:
19 | global _process
20 | if _process:
21 | return _process
22 |
23 | LOG.info("starting localstripe server on port %s", port)
24 | _process = Process(target=_serve, args=(port,), daemon=True)
25 | _process.start()
26 | return _process
27 |
28 |
29 | def shutdown():
30 | global _process
31 | if not _process:
32 | return
33 | LOG.info("shutting down localstripe server")
34 |
35 | _process.terminate()
36 | _process = None
37 |
--------------------------------------------------------------------------------
/stripe/logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/localstack/localstack-extensions/9519b7b9f7435618d5f9dc9895000fdf97000d1d/stripe/logo.png
--------------------------------------------------------------------------------
/stripe/pyproject.toml:
--------------------------------------------------------------------------------
1 | # project configuration
2 |
3 | [tool.black]
4 | line_length = 100
5 | include = '((localstack_stripe)/.*\.py$|tests/.*\.py$)'
6 | #extend_exclude = '()'
7 |
8 | [tool.isort]
9 | profile = 'black'
10 | #extend_skip = []
11 | line_length = 100
12 |
--------------------------------------------------------------------------------
/stripe/setup.cfg:
--------------------------------------------------------------------------------
1 | [metadata]
2 | name = localstack-extension-stripe
3 | version = attr: localstack_stripe.__version__
4 | url = https://github.com/localstack/localstack-extensions/tree/main/stripe
5 | author = Thomas Rausch
6 | author_email = thomas@localstack.cloud
7 | summary = LocalStack Extension: Stripe
8 | description = A LocalStack extension that provides a mocked version of Stripe as a service
9 | long_description = file: README.md
10 | long_description_content_type = text/markdown; charset=UTF-8
11 | license = Apache License 2.0
12 | classifiers =
13 | Development Status :: 5 - Production/Stable
14 | License :: OSI Approved :: Apache Software License
15 | Operating System :: OS Independent
16 | Programming Language :: Python :: 3
17 | Programming Language :: Python :: 3.8
18 | Programming Language :: Python :: 3.9
19 | Topic :: Software Development :: Libraries
20 | Topic :: Utilities
21 |
22 | [options]
23 | zip_safe = False
24 | packages = find:
25 | setup_requires =
26 | setuptools
27 | wheel
28 | plux>=1.3
29 | install_requires =
30 | plux>=1.3
31 | localstack-localstripe>=1.15.6
32 | test_requires =
33 | pytest>=6.2.4
34 |
35 | [options.extras_require]
36 | dev =
37 | localstack-core>=1.0
38 | pytest>=6.2.4
39 | black==22.3.0
40 | isort==5.10.1
41 |
42 | [options.packages.find]
43 | exclude =
44 | tests*
45 |
46 | [options.package_data]
47 | * = *.md
48 |
--------------------------------------------------------------------------------
/stripe/setup.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | from setuptools import setup
3 |
4 | entry_points = {
5 | "localstack.extensions": [
6 | "localstripe=localstack_stripe.extension:LocalstripeExtension"
7 | ],
8 | }
9 |
10 | setup(entry_points=entry_points)
11 |
--------------------------------------------------------------------------------
/template/README.md:
--------------------------------------------------------------------------------
1 | Extension Template
2 | ==================
3 |
4 | > [!NOTE]
5 | > This template is used for localstack CLI versions <= 3.6.0. For later version see https://github.com/localstack/localstack-extensions/tree/main/templates
6 |
7 | This is a [cookiecutter](https://github.com/cookiecutter/cookiecutter) template that is used when you invoke.
8 |
9 | ```console
10 | localstack extensions dev new
11 | ```
12 |
13 | It contains a simple python distribution config, and some boilerplate extension code.
14 |
--------------------------------------------------------------------------------
/template/cookiecutter.json:
--------------------------------------------------------------------------------
1 | {
2 | "project_name": "My LocalStack Extension",
3 | "project_short_description": "All the boilerplate you need to create a LocalStack extension.",
4 | "project_slug": "{{ cookiecutter.project_name.lower().replace(' ', '-') }}",
5 | "module_name": "{{ cookiecutter.project_slug.replace('-', '_') }}",
6 | "full_name": "Jane Doe",
7 | "email": "jane@example.com",
8 | "github_username": "janedoe",
9 | "version": "0.1.0"
10 | }
--------------------------------------------------------------------------------
/template/{{cookiecutter.project_slug}}/Makefile:
--------------------------------------------------------------------------------
1 | VENV_BIN = python3 -m venv
2 | VENV_DIR ?= .venv
3 | VENV_ACTIVATE = $(VENV_DIR)/bin/activate
4 | VENV_RUN = . $(VENV_ACTIVATE)
5 |
6 | venv: $(VENV_ACTIVATE)
7 |
8 | $(VENV_ACTIVATE): setup.py setup.cfg
9 | test -d .venv || $(VENV_BIN) .venv
10 | $(VENV_RUN); pip install --upgrade pip setuptools plux
11 | touch $(VENV_DIR)/bin/activate
12 |
13 | clean:
14 | rm -rf .venv/
15 | rm -rf build/
16 | rm -rf .eggs/
17 | rm -rf *.egg-info/
18 |
19 | install: venv
20 | $(VENV_RUN); python -m pip install -e .[dev]
21 |
22 | dist: venv
23 | $(VENV_RUN); python setup.py sdist bdist_wheel
24 |
25 | publish: clean-dist venv dist
26 | $(VENV_RUN); pip install --upgrade twine; twine upload dist/*
27 |
28 | clean-dist: clean
29 | rm -rf dist/
30 |
31 | .PHONY: clean clean-dist dist install publish
32 |
--------------------------------------------------------------------------------
/template/{{cookiecutter.project_slug}}/README.md:
--------------------------------------------------------------------------------
1 | {{ cookiecutter.project_name }}
2 | ===============================
3 |
4 | {{ cookiecutter.project_short_description }}
5 |
6 | ## Install local development version
7 |
8 | To install the extension into localstack in developer mode, you will need Python 3.10, and create a virtual environment in the extensions project.
9 |
10 | In the newly generated project, simply run
11 |
12 | ```bash
13 | make install
14 | ```
15 |
16 | Then, to enable the extension for LocalStack, run
17 |
18 | ```bash
19 | localstack extensions dev enable .
20 | ```
21 |
22 | You can then start LocalStack with `EXTENSION_DEV_MODE=1` to load all enabled extensions:
23 |
24 | ```bash
25 | EXTENSION_DEV_MODE=1 localstack start
26 | ```
27 |
28 | ## Install from GitHub repository
29 |
30 | To distribute your extension, simply upload it to your github account. Your extension can then be installed via:
31 |
32 | ```bash
33 | localstack extensions install "git+https://github.com/{{cookiecutter.github_username }}/{{ cookiecutter.project_slug }}/#egg={{ cookiecutter.project_slug }}"
34 | ```
35 |
--------------------------------------------------------------------------------
/template/{{cookiecutter.project_slug}}/setup.cfg:
--------------------------------------------------------------------------------
1 | [metadata]
2 | name = {{ cookiecutter.project_slug }}
3 | version = {{ cookiecutter.version }}
4 | summary = LocalStack Extension: {{ cookiecutter.project_name }}
5 | url = https://github.com/{{ cookiecutter.github_username }}/{{ cookiecutter.project_slug }}
6 | author = {{ cookiecutter.full_name }}
7 | author_email = {{ cookiecutter.email }}
8 | description = {{ cookiecutter.project_short_description }}
9 | long_description = file: README.md
10 | long_description_content_type = text/markdown; charset=UTF-8
11 |
12 | [options]
13 | zip_safe = False
14 | packages = find:
15 |
16 | [options.extras_require]
17 | dev =
18 | localstack-core>=1.0
19 |
20 | [options.entry_points]
21 | localstack.extensions =
22 | {{ cookiecutter.project_slug }} = {{ cookiecutter.module_name }}.extension:MyExtension
23 |
--------------------------------------------------------------------------------
/template/{{cookiecutter.project_slug}}/setup.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | from setuptools import setup
3 |
4 | setup()
5 |
--------------------------------------------------------------------------------
/template/{{cookiecutter.project_slug}}/{{cookiecutter.module_name}}/__init__.py:
--------------------------------------------------------------------------------
1 | name = "{{ cookiecutter.module_name }}"
2 |
--------------------------------------------------------------------------------
/template/{{cookiecutter.project_slug}}/{{cookiecutter.module_name}}/extension.py:
--------------------------------------------------------------------------------
1 | from localstack.extensions.api import Extension, http, aws
2 |
3 | class MyExtension(Extension):
4 | name = "{{ cookiecutter.project_slug }}"
5 |
6 | def on_extension_load(self):
7 | print("MyExtension: extension is loaded")
8 |
9 | def on_platform_start(self):
10 | print("MyExtension: localstack is starting")
11 |
12 | def on_platform_ready(self):
13 | print("MyExtension: localstack is running")
14 |
15 | def update_gateway_routes(self, router: http.Router[http.RouteHandler]):
16 | pass
17 |
18 | def update_request_handlers(self, handlers: aws.CompositeHandler):
19 | pass
20 |
21 | def update_response_handlers(self, handlers: aws.CompositeResponseHandler):
22 | pass
23 |
--------------------------------------------------------------------------------
/templates/basic/README.md:
--------------------------------------------------------------------------------
1 | Extension Template
2 | ==================
3 |
4 | This is a [cookiecutter](https://github.com/cookiecutter/cookiecutter) template that is used when you invoke.
5 |
6 | ```console
7 | localstack extensions dev new
8 | ```
9 |
10 | It contains a simple python distribution config, and some boilerplate extension code.
11 |
--------------------------------------------------------------------------------
/templates/basic/cookiecutter.json:
--------------------------------------------------------------------------------
1 | {
2 | "project_name": "My LocalStack Extension",
3 | "project_short_description": "All the boilerplate you need to create a LocalStack extension.",
4 | "project_slug": "{{ cookiecutter.project_name.lower().replace(' ', '-') }}",
5 | "module_name": "{{ cookiecutter.project_slug.replace('-', '_') }}",
6 | "class_name": "{{ cookiecutter.project_name.replace('-', ' ').replace('_', ' ').title().replace(' ', '') }}",
7 | "full_name": "Jane Doe",
8 | "email": "jane@example.com",
9 | "github_username": "janedoe",
10 | "version": "0.1.0"
11 | }
--------------------------------------------------------------------------------
/templates/basic/{{cookiecutter.project_slug}}/.gitignore:
--------------------------------------------------------------------------------
1 | .venv
2 | dist
3 | build
4 | **/*.egg-info
5 | .eggs
--------------------------------------------------------------------------------
/templates/basic/{{cookiecutter.project_slug}}/Makefile:
--------------------------------------------------------------------------------
1 | VENV_BIN = python3 -m venv
2 | VENV_DIR ?= .venv
3 | VENV_ACTIVATE = $(VENV_DIR)/bin/activate
4 | VENV_RUN = . $(VENV_ACTIVATE)
5 |
6 | venv: $(VENV_ACTIVATE)
7 |
8 | $(VENV_ACTIVATE): pyproject.toml
9 | test -d .venv || $(VENV_BIN) .venv
10 | $(VENV_RUN); pip install --upgrade pip setuptools plux
11 | $(VENV_RUN); pip install -e .[dev]
12 | touch $(VENV_DIR)/bin/activate
13 |
14 | clean:
15 | rm -rf .venv/
16 | rm -rf build/
17 | rm -rf .eggs/
18 | rm -rf *.egg-info/
19 |
20 | install: venv
21 | $(VENV_RUN); python -m plux entrypoints
22 |
23 | dist: venv
24 | $(VENV_RUN); python -m build
25 |
26 | publish: clean-dist venv dist
27 | $(VENV_RUN); pip install --upgrade twine; twine upload dist/*
28 |
29 | clean-dist: clean
30 | rm -rf dist/
31 |
32 | .PHONY: clean clean-dist dist install publish
33 |
--------------------------------------------------------------------------------
/templates/basic/{{cookiecutter.project_slug}}/README.md:
--------------------------------------------------------------------------------
1 | {{ cookiecutter.project_name }}
2 | ===============================
3 |
4 | {{ cookiecutter.project_short_description }}
5 |
6 | ## Install local development version
7 |
8 | To install the extension into localstack in developer mode, you will need Python 3.10, and create a virtual environment in the extensions project.
9 |
10 | In the newly generated project, simply run
11 |
12 | ```bash
13 | make install
14 | ```
15 |
16 | Then, to enable the extension for LocalStack, run
17 |
18 | ```bash
19 | localstack extensions dev enable .
20 | ```
21 |
22 | You can then start LocalStack with `EXTENSION_DEV_MODE=1` to load all enabled extensions:
23 |
24 | ```bash
25 | EXTENSION_DEV_MODE=1 localstack start
26 | ```
27 |
28 | ## Install from GitHub repository
29 |
30 | To distribute your extension, simply upload it to your github account. Your extension can then be installed via:
31 |
32 | ```bash
33 | localstack extensions install "git+https://github.com/{{cookiecutter.github_username }}/{{ cookiecutter.project_slug }}/#egg={{ cookiecutter.project_slug }}"
34 | ```
35 |
--------------------------------------------------------------------------------
/templates/basic/{{cookiecutter.project_slug}}/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | requires = ["setuptools", 'wheel', 'plux>=1.3.1']
3 | build-backend = "setuptools.build_meta"
4 |
5 | [project]
6 | name = "{{ cookiecutter.project_slug }}"
7 | version = "{{ cookiecutter.version }}"
8 | description = "LocalStack Extension: {{ cookiecutter.project_name }}"
9 | readme = {file = "README.md", content-type = "text/markdown; charset=UTF-8"}
10 | requires-python = ">=3.8"
11 | license = {text = "UNLICENSED"}
12 | authors = [
13 | { name = "{{ cookiecutter.full_name }}", email = "{{ cookiecutter.email }}" }
14 | ]
15 | keywords = ["localstack", "localstack-extension", "extension"]
16 | classifiers = []
17 | dependencies = [
18 | ]
19 |
20 | [project.urls]
21 | Homepage = "https://github.com/{{ cookiecutter.github_username }}/{{ cookiecutter.project_slug }}"
22 |
23 | [project.optional-dependencies]
24 | dev = [
25 | "localstack>=0.0.0.dev"
26 | ]
27 |
28 | [project.entry-points."localstack.extensions"]
29 | {{ cookiecutter.module_name }} = "{{ cookiecutter.module_name }}.extension:{{ cookiecutter.class_name }}"
30 |
--------------------------------------------------------------------------------
/templates/basic/{{cookiecutter.project_slug}}/{{cookiecutter.module_name}}/__init__.py:
--------------------------------------------------------------------------------
1 | name = "{{ cookiecutter.module_name }}"
2 |
--------------------------------------------------------------------------------
/templates/basic/{{cookiecutter.project_slug}}/{{cookiecutter.module_name}}/extension.py:
--------------------------------------------------------------------------------
1 | from localstack.extensions.api import Extension, http, aws
2 |
3 | class {{ cookiecutter.class_name }}(Extension):
4 | name = "{{ cookiecutter.project_slug }}"
5 |
6 | def on_extension_load(self):
7 | print("MyExtension: extension is loaded")
8 |
9 | def on_platform_start(self):
10 | print("MyExtension: localstack is starting")
11 |
12 | def on_platform_ready(self):
13 | print("MyExtension: localstack is running")
14 |
15 | def update_gateway_routes(self, router: http.Router[http.RouteHandler]):
16 | pass
17 |
18 | def update_request_handlers(self, handlers: aws.CompositeHandler):
19 | pass
20 |
21 | def update_response_handlers(self, handlers: aws.CompositeResponseHandler):
22 | pass
23 |
--------------------------------------------------------------------------------
/templates/react/README.md:
--------------------------------------------------------------------------------
1 | Extension Template
2 | ==================
3 |
4 | This is a [cookiecutter](https://github.com/cookiecutter/cookiecutter) template that is used when you invoke.
5 |
6 | ```console
7 | localstack extensions dev new --template=react
8 | ```
9 |
10 | It contains a simple python distribution config, and some boilerplate extension code.
11 |
--------------------------------------------------------------------------------
/templates/react/cookiecutter.json:
--------------------------------------------------------------------------------
1 | {
2 | "project_name": "My LocalStack Extension",
3 | "project_short_description": "All the boilerplate you need to create a LocalStack extension.",
4 | "project_slug": "{{ cookiecutter.project_name.lower().replace(' ', '-') }}",
5 | "module_name": "{{ cookiecutter.project_slug.replace('-', '_') }}",
6 | "class_name": "{{ cookiecutter.project_name.replace('-', ' ').replace('_', ' ').title().replace(' ', '') }}",
7 | "full_name": "Jane Doe",
8 | "email": "jane@example.com",
9 | "github_username": "janedoe",
10 | "version": "0.1.0"
11 | }
--------------------------------------------------------------------------------
/templates/react/{{cookiecutter.project_slug}}/.gitignore:
--------------------------------------------------------------------------------
1 | .venv
2 | frontend/node_modules
3 | frontend/.yarn
4 | dist
5 | build
6 | **/*.egg-info
7 | .eggs
8 | __pycache__
9 | *.pyc
--------------------------------------------------------------------------------
/templates/react/{{cookiecutter.project_slug}}/Makefile:
--------------------------------------------------------------------------------
1 | VENV_BIN = python3 -m venv
2 | VENV_DIR ?= .venv
3 | VENV_ACTIVATE = $(VENV_DIR)/bin/activate
4 | VENV_RUN = . $(VENV_ACTIVATE)
5 | FRONTEND_FOLDER = frontend
6 | BACKEND_FOLDER = backend
7 | COREPACK_EXISTS := $(shell command -v corepack)
8 | YARN_EXISTS := $(shell command -v yarn)
9 |
10 |
11 | INFO_COLOR = \033[0;36m
12 | NO_COLOR = \033[m
13 |
14 | venv: $(VENV_ACTIVATE)
15 |
16 | $(VENV_ACTIVATE):
17 | test -d .venv || $(VENV_BIN) .venv
18 | $(VENV_RUN); pip install --upgrade pip setuptools plux build wheel
19 | $(VENV_RUN); pip install -e .[dev]
20 | touch $(VENV_DIR)/bin/activate
21 |
22 | check-frontend-deps:
23 | @if [ -z "$(YARN_EXISTS)" ]; then \
24 | npm install --global yarn; \
25 | fi
26 | @if [ -z "$(COREPACK_EXISTS)" ]; then \
27 | npm install -g corepack; \
28 | fi
29 |
30 | clean: ## Clean the project
31 | rm -rf .venv/
32 | rm -rf build/
33 | rm -rf .eggs/
34 | rm -rf $(BACKEND_FOLDER)/*.egg-info/
35 |
36 | install-backend: venv ## Install dependencies of the extension
37 | $(VENV_RUN); python -m plux entrypoints
38 |
39 | install-frontend: venv check-frontend-deps ## Install dependencies of the frontend
40 | cd $(FRONTEND_FOLDER) && yarn install
41 |
42 | build-frontend: # Build the React app
43 | @if [ ! -d "$(FRONTEND_FOLDER)/node_modules" ]; then \
44 | $(MAKE) install-frontend; \
45 | fi
46 | cd $(FRONTEND_FOLDER); rm -rf build && REACT_APP_DEVELOPMENT_ENVIRONMENT=false NODE_ENV=prod npm run build
47 |
48 | start-frontend: ## Start the frontend in dev mode (hot reload)
49 | cd $(FRONTEND_FOLDER); REACT_APP_DEVELOPMENT_ENVIRONMENT=true yarn start
50 |
51 | install: venv install-backend install-frontend ## Install dependencies
52 |
53 | dist: venv build-frontend ## Create distribution files
54 | $(VENV_RUN); python -m build
55 |
56 | publish: clean-dist venv dist ## Build and upload package to pypi
57 | $(VENV_RUN); pip install --upgrade twine; twine upload dist/*
58 |
59 | clean-dist: clean ## Remove dist folder
60 | rm -rf dist/
61 |
62 | help: ## Show this help
63 | @echo Please specify a build target. The choices are:
64 | @grep -E '^[0-9a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf "$(INFO_COLOR)%-30s$(NO_COLOR) %s\n", $$1, $$2}'
65 |
66 | .PHONY: clean clean-dist dist install install-backend install-frontend build-frontend start-frontend publish venv
67 |
--------------------------------------------------------------------------------
/templates/react/{{cookiecutter.project_slug}}/README.md:
--------------------------------------------------------------------------------
1 | {{ cookiecutter.project_name }}
2 | ===============================
3 |
4 | {{ cookiecutter.project_short_description }}
5 |
6 | ## Install local development version
7 |
8 | To install the extension into localstack in developer mode, you will need Python 3.10+, and create a virtual environment in the extensions project.
9 | You will also need to install [yarn](https://yarnpkg.com/getting-started/install) as package manager if you haven't already
10 | In the newly generated project, simply run
11 |
12 | ```bash
13 | make install
14 | ```
15 |
16 | Then, to enable the extension for LocalStack, run
17 |
18 | ```bash
19 | localstack extensions dev enable .
20 | ```
21 |
22 | You can then start LocalStack with `EXTENSION_DEV_MODE=1` to load all enabled extensions:
23 |
24 | ```bash
25 | EXTENSION_DEV_MODE=1 localstack start
26 | ```
27 |
28 | ## Developing UI
29 | With this template is generated also a UI made in react that is available at either {{ cookiecutter.project_name }}.localhost.localstack.cloud:4566/ or http://localhost.localstack.cloud:4566/_extension/{{ cookiecutter.project_name }}/.
30 |
31 | There are a few make commands available that will help your journey with the UI:
32 | - **build-frontend**: will build the react app into the frontend/build folder which will then be passed into the extension itself allowing the UI to be seen. Remember to always execute this command when you wish to see new changes when using the extension.
33 | - **start-frontend**: will start a live server on port 3000 (by default) that will allow you to have hot reloading when developing locally outside the extension (it will also build the frontend)
34 |
35 |
36 | ## Install from GitHub repository
37 |
38 | To distribute your extension, simply upload it to your github account. Your extension can then be installed via:
39 |
40 | ```bash
41 | localstack extensions install "git+https://github.com/{{cookiecutter.github_username }}/{{ cookiecutter.project_slug }}/#egg={{ cookiecutter.project_slug }}"
42 | ```
43 |
--------------------------------------------------------------------------------
/templates/react/{{cookiecutter.project_slug}}/backend.pth:
--------------------------------------------------------------------------------
1 | backend
--------------------------------------------------------------------------------
/templates/react/{{cookiecutter.project_slug}}/backend/{{cookiecutter.module_name}}/__init__.py:
--------------------------------------------------------------------------------
1 | name = "{{ cookiecutter.module_name }}"
2 |
--------------------------------------------------------------------------------
/templates/react/{{cookiecutter.project_slug}}/backend/{{cookiecutter.module_name}}/api/web.py:
--------------------------------------------------------------------------------
1 | from localstack.http import route, Request, Response
2 |
3 | from .. import static
4 |
5 | class WebApp:
6 | @route("/")
7 | def index(self, request: Request, *args, **kwargs):
8 | return Response.for_resource(static, "index.html")
9 |
10 | @route("/")
11 | def index2(self, request: Request, path: str, **kwargs):
12 | return Response.for_resource(static, path)
13 |
--------------------------------------------------------------------------------
/templates/react/{{cookiecutter.project_slug}}/backend/{{cookiecutter.module_name}}/extension.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import typing as t
3 |
4 | from localstack.extensions.patterns.webapp import WebAppExtension
5 |
6 | from .api.web import WebApp
7 |
8 | LOG = logging.getLogger(__name__)
9 |
10 |
11 | class {{ cookiecutter.class_name }}(WebAppExtension):
12 | name = "{{ cookiecutter.project_slug }}"
13 |
14 | def __init__(self):
15 | super().__init__(template_package_path=None)
16 |
17 | def collect_routes(self, routes: list[t.Any]):
18 | routes.append(WebApp())
19 |
20 |
--------------------------------------------------------------------------------
/templates/react/{{cookiecutter.project_slug}}/backend/{{cookiecutter.module_name}}/static/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/localstack/localstack-extensions/9519b7b9f7435618d5f9dc9895000fdf97000d1d/templates/react/{{cookiecutter.project_slug}}/backend/{{cookiecutter.module_name}}/static/__init__.py
--------------------------------------------------------------------------------
/templates/react/{{cookiecutter.project_slug}}/frontend/.esbuild/esbuild.config.js:
--------------------------------------------------------------------------------
1 | /* eslint-disable global-require */
2 |
3 | const esbuild = require('esbuild');
4 | const path = require('path');
5 |
6 | const SvgrPlugin = require('esbuild-plugin-svgr');
7 | const CopyPlugin = require('esbuild-plugin-copy').default;
8 | const CleanPlugin = require('esbuild-plugin-clean').default;
9 | const { NodeModulesPolyfillPlugin } = require('@esbuild-plugins/node-modules-polyfill');
10 |
11 | const packageJson = require('../package.json');
12 | const HtmlPlugin = require('./plugins/html');
13 | const { writeFileSync } = require('fs');
14 |
15 | const CURRENT_ENV = process.env.NODE_ENV || 'development.local';
16 | const BUILD_PATH = path.join(__dirname, '..', '..', 'backend', '{{cookiecutter.module_name}}', 'static');
17 |
18 | const BUILD_CONFIG = {
19 | entryPoints: [
20 | path.join(__dirname, '..', 'src', 'index.tsx'),
21 | path.join(__dirname, '..', 'src', 'index.html'),
22 | ],
23 | assetNames: '[name]-[hash]',
24 | entryNames: '[name]-[hash]',
25 | outdir: BUILD_PATH,
26 | bundle: true,
27 | minify: !CURRENT_ENV.includes('development.local'),
28 | sourcemap: true,
29 | target: 'es2015',
30 | metafile: true,
31 | // splitting: true,
32 | // set in case file loader is added below
33 | plugins: [
34 | CleanPlugin({
35 | patterns: [`${BUILD_PATH}/*`, `!${BUILD_PATH}/index.html`],
36 | sync: true,
37 | verbose: false,
38 | options: {
39 | force: true
40 | }
41 | }),
42 | SvgrPlugin({
43 | prettier: false,
44 | svgo: false,
45 | svgoConfig: {
46 | plugins: [{ removeViewBox: false }],
47 | },
48 | titleProp: true,
49 | ref: true,
50 | }),
51 | CopyPlugin({
52 | copyOnStart: true,
53 | // https://github.com/LinbuduLab/nx-plugins/issues/57
54 | assets: [
55 | {
56 | from: ['./public/*'],
57 | to: ['./'],
58 | },
59 | ],
60 | }),
61 | NodeModulesPolyfillPlugin(),
62 | HtmlPlugin({
63 | filename: path.join(BUILD_PATH, 'index.html'),
64 | env: true,
65 | }),
66 | ],
67 | inject: [path.join(__dirname, 'esbuild.shims.js')],
68 | define: {
69 | // Define replacements for env vars starting with `REACT_APP_`
70 | ...Object.entries(process.env).reduce(
71 | (memo, [name, value]) => name.startsWith('REACT_APP_') ?
72 | { ...memo, [`process.env.${name}`]: JSON.stringify(value) } :
73 | memo,
74 | {},
75 | ),
76 | 'process.cwd': 'dummyProcessCwd',
77 | global: 'window',
78 | },
79 | external: [
80 | ...Object.keys(packageJson.devDependencies || {}),
81 | ],
82 | loader: {
83 | '.md': 'text',
84 | '.gif': 'dataurl',
85 | }
86 | };
87 |
88 | const build = async (overrides = {}) => {
89 | try {
90 | await esbuild.build({ ...BUILD_CONFIG, ...overrides });
91 | writeFileSync(path.join(BUILD_PATH, '__init__.py'),'')
92 | console.log('done building');
93 | } catch (e) {
94 | console.error(e);
95 | process.exit(1);
96 | }
97 | };
98 |
99 | module.exports = { build };
100 |
--------------------------------------------------------------------------------
/templates/react/{{cookiecutter.project_slug}}/frontend/.esbuild/esbuild.shims.js:
--------------------------------------------------------------------------------
1 | import * as React from 'react';
2 |
3 | export { React };
4 |
5 | export function dummyProcessCwd() {
6 | return '';
7 | };
8 |
--------------------------------------------------------------------------------
/templates/react/{{cookiecutter.project_slug}}/frontend/.esbuild/index.js:
--------------------------------------------------------------------------------
1 | const { build, serve } = require('./esbuild.config');
2 |
3 | (async () => {
4 | if (process.argv.includes('--serve')) {
5 | await serve();
6 | } else if (process.argv.includes('--watch')) {
7 | await build({ watch: true });
8 | } else {
9 | await build();
10 | }
11 | })();
12 |
--------------------------------------------------------------------------------
/templates/react/{{cookiecutter.project_slug}}/frontend/.esbuild/plugins/html/index.js:
--------------------------------------------------------------------------------
1 | const fs = require('fs');
2 | const path = require('path');
3 | const crypto = require('crypto');
4 |
5 | /**
6 | * @param {object} config
7 | * @param {string} config.filename - HTML file to process and override
8 | * @param {boolean} config.env - Whether to replace env vars or not (default - `false`)
9 | * @param {string} config.envPrefix - Limit env vars to pick (default - `REACT_APP_`)
10 | */
11 | const HtmlPlugin = (config) => ({
12 | name: 'html',
13 | setup(build) {
14 | build.onResolve({ filter: /\.html$/ }, args => ({
15 | path: path.resolve(args.resolveDir, args.path),
16 | namespace: 'html',
17 | }));
18 | build.onLoad({ filter: /.html/, namespace: 'html' }, (args) => {
19 | let htmlContent = fs.readFileSync(args.path).toString('utf-8');
20 |
21 | // replace env vars
22 | if (config.env) {
23 | const envPrefix = config.envPrefix || 'REACT_APP_';
24 | const envVars = Object.entries(process.env || {}).filter(([name]) => name.startsWith(envPrefix));
25 | htmlContent = envVars.reduce(
26 | (memo, [name, value]) => memo.replace(new RegExp(`%${name}%`, 'igm'), value),
27 | htmlContent,
28 | );
29 | }
30 |
31 | return {
32 | contents: htmlContent,
33 | loader: 'file'
34 | };
35 | });
36 |
37 | build.onEnd((result) => {
38 | const outFiles = Object.keys((result.metafile || {}).outputs);
39 | const jsFiles = outFiles.filter((p) => p.endsWith('.js'));
40 | const cssFiles = outFiles.filter((p) => p.endsWith('.css'));
41 | const htmlFiles = outFiles.filter((p) => p.endsWith('.html'));
42 |
43 | const headerAppends = cssFiles.reduce(
44 | (memo, p) => {
45 | const filename = p.split(path.sep).slice(-1)[0];
46 | return [...memo, ``];
47 | },
48 | [],
49 | );
50 |
51 | const bodyAppends = jsFiles.reduce(
52 | (memo, p) => {
53 | const filename = p.split(path.sep).slice(-1)[0];
54 | return [...memo, ``];
55 | },
56 | [],
57 | );
58 |
59 | for (const htmlFile of htmlFiles) {
60 | let htmlContent = fs.readFileSync(htmlFile).toString('utf-8');
61 |
62 | // replace env vars
63 | if (config.env) {
64 | const envPrefix = config.envPrefix || 'REACT_APP_';
65 | const envVars = Object.entries(process.env).filter(([name]) => name.startsWith(envPrefix));
66 |
67 | htmlContent = envVars.reduce(
68 | (memo, [name, value]) => memo.replace(new RegExp(`%${name}%`, 'igm'), value),
69 | htmlContent,
70 | );
71 | }
72 |
73 | // inject references to js and css files
74 | htmlContent = htmlContent
75 | .replace('', [...headerAppends, ''].join("\n"))
76 | .replace('
17 |
18 |
19 |