├── .assets ├── demo.gif ├── demo.yml └── logo-dark.png ├── .github └── workflows │ ├── links-push.yml │ └── test.yaml ├── .gitignore ├── .vscode └── extensions.json ├── LICENSE ├── README.md ├── amqp.reflect ├── README.md ├── compose.yaml └── etc │ └── zilla.yaml ├── asyncapi.http.kafka.proxy ├── .github │ └── test.sh ├── README.md ├── compose.yaml ├── etc │ └── zilla.yaml ├── http-asyncapi.yaml └── kafka-asyncapi.yaml ├── asyncapi.mqtt.kafka.proxy ├── .github │ └── test.sh ├── README.md ├── compose.yaml ├── etc │ └── zilla.yaml ├── kafka-asyncapi.yaml └── mqtt-asyncapi.yaml ├── asyncapi.mqtt.proxy ├── .github │ └── test.sh ├── README.md ├── compose.yaml ├── etc │ └── zilla.yaml └── mqtt-asyncapi.yaml ├── asyncapi.sse.kafka.proxy ├── .github │ └── test.sh ├── README.md ├── compose.yaml ├── etc │ └── zilla.yaml ├── kafka-asyncapi.yaml └── sse-asyncapi.yaml ├── asyncapi.sse.proxy ├── .github │ └── test.sh ├── README.md ├── compose.yaml ├── etc │ └── zilla.yaml └── sse-asyncapi.yaml ├── grpc.echo ├── .github │ └── test.sh ├── README.md ├── bench.json ├── compose.yaml └── etc │ ├── protos │ └── echo.proto │ └── zilla.yaml ├── grpc.kafka.echo ├── .github │ └── test.sh ├── README.md ├── bench.json ├── compose.yaml └── etc │ ├── protos │ └── echo.proto │ └── zilla.yaml ├── grpc.kafka.fanout ├── .github │ └── test.sh ├── .gitignore ├── README.md ├── binary.data ├── compose.yaml ├── etc │ ├── protos │ │ └── fanout.proto │ └── zilla.yaml └── grpc.reliable.streaming │ ├── .gitignore │ ├── .mvn │ └── wrapper │ │ ├── MavenWrapperDownloader.java │ │ └── maven-wrapper.properties │ ├── COPYRIGHT │ ├── mvnw │ ├── mvnw.cmd │ ├── pom.xml │ └── src │ └── main │ ├── java │ └── io │ │ └── aklivity │ │ └── zilla │ │ └── example │ │ └── grpc │ │ └── reliable │ │ └── streaming │ │ ├── LastMessageIdInterceptor.java │ │ └── ReliableStreaming.java │ ├── proto │ └── fanout.proto │ └── resources │ └── io │ └── aklivity │ └── zilla │ └── example │ └── grpc │ └── reliable │ └── streaming │ ├── certs │ └── test-ca.crt │ └── config │ └── retrying_service_config.json ├── grpc.kafka.proxy ├── .github │ └── test.sh ├── README.md ├── compose.yaml └── etc │ ├── protos │ └── echo.proto │ └── zilla.yaml ├── grpc.proxy ├── .github │ └── test.sh ├── README.md ├── compose.yaml └── etc │ ├── protos │ └── echo.proto │ ├── tls │ └── localhost.p12 │ └── zilla.yaml ├── http.filesystem ├── .github │ └── test.sh ├── README.md ├── compose.yaml ├── etc │ └── zilla.yaml └── www │ └── index.html ├── http.json.schema ├── .github │ └── test.sh ├── README.md ├── compose.yaml ├── etc │ └── zilla.yaml ├── nginx.conf └── www │ ├── demo.html │ ├── invalid.json │ ├── style.css │ └── valid.json ├── http.kafka.async ├── .github │ └── test.sh ├── README.md ├── compose.yaml └── etc │ └── zilla.yaml ├── http.kafka.avro.json ├── .github │ └── test.sh ├── README.md ├── compose.yaml └── etc │ └── zilla.yaml ├── http.kafka.cache ├── .github │ └── test.sh ├── README.md ├── compose.yaml └── etc │ └── zilla.yaml ├── http.kafka.crud ├── .github │ └── test.sh ├── README.md ├── compose.yaml └── etc │ └── zilla.yaml ├── http.kafka.oneway ├── .github │ └── test.sh ├── README.md ├── compose.yaml ├── etc │ └── zilla.yaml ├── keystore │ └── kafka.keystore.jks └── truststore │ ├── ca-key │ └── kafka.truststore.jks ├── http.kafka.proto.json ├── .github │ └── test.sh ├── README.md ├── compose.yaml └── etc │ ├── protos │ └── request.proto │ └── zilla.yaml ├── http.kafka.proto.oneway ├── .github │ └── test.sh ├── README.md ├── compose.yaml ├── encoded_input.bin └── etc │ ├── protos │ ├── request.proto │ ├── request_bad_type.proto │ ├── request_extra_field.proto │ └── request_wrong_order.proto │ └── zilla.yaml ├── http.kafka.sync ├── .github │ └── test.sh ├── README.md ├── compose.yaml └── etc │ └── zilla.yaml ├── http.proxy.jwt ├── .github │ └── test.sh ├── README.md ├── compose.yaml ├── etc │ └── zilla.yaml └── private.pem ├── http.proxy ├── .github │ └── test.sh ├── README.md ├── compose.yaml ├── etc │ ├── tls │ │ ├── localhost.p12 │ │ └── truststore.p12 │ └── zilla.yaml ├── nginx.conf ├── ssl │ ├── cert.pem │ └── key.pem └── www │ ├── demo.html │ └── style.css ├── mqtt.kafka.proxy ├── .github │ └── test.sh ├── README.md ├── compose.yaml └── etc │ └── zilla.yaml ├── mqtt.proxy.jwt ├── .github │ └── test.sh ├── README.md ├── compose.yaml ├── etc │ ├── tls │ │ └── localhost.p12 │ └── zilla.yaml └── private.pem ├── openapi.asyncapi.kakfa.proxy ├── .github │ └── test.sh ├── README.md ├── compose.yaml ├── etc │ └── zilla.yaml ├── http-openapi.yaml └── kafka-asyncapi.yaml ├── openapi.proxy ├── .github │ └── test.sh ├── README.md ├── compose.yaml ├── etc │ └── zilla.yaml └── petstore-openapi.yaml ├── sse.jwt ├── .github │ └── test.sh ├── README.md ├── compose.yaml ├── etc │ ├── tls │ │ └── localhost.p12 │ └── zilla.yaml ├── private.pem ├── test-ca.crt └── www │ └── index.html ├── sse.kafka.fanout ├── .github │ └── test.sh ├── README.md ├── compose.yaml ├── etc │ └── zilla.yaml └── www │ └── index.html ├── tcp.echo ├── .github │ └── test.sh ├── README.md ├── compose.yaml └── etc │ └── zilla.yaml ├── tcp.reflect ├── .github │ └── test.sh ├── README.md ├── compose.yaml └── etc │ └── zilla.yaml ├── tls.echo ├── .github │ └── test.sh ├── .gitignore ├── README.md ├── compose.yaml ├── etc │ ├── tls │ │ └── localhost.p12 │ └── zilla.yaml ├── localhost.p12 ├── regen └── test-ca.crt ├── tls.reflect ├── .github │ └── test.sh ├── README.md ├── compose.yaml ├── etc │ ├── tls │ │ └── localhost.p12 │ └── zilla.yaml └── test-ca.crt ├── ws.echo ├── .github │ └── test.sh ├── README.md ├── compose.yaml └── etc │ └── zilla.yaml └── ws.reflect ├── .github └── test.sh ├── README.md ├── compose.yaml └── etc └── zilla.yaml /.assets/demo.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aklivity/zilla-examples/7a5307d97d4a9e1fd731ac4fc3230f0b9e53e36e/.assets/demo.gif -------------------------------------------------------------------------------- /.assets/logo-dark.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aklivity/zilla-examples/7a5307d97d4a9e1fd731ac4fc3230f0b9e53e36e/.assets/logo-dark.png -------------------------------------------------------------------------------- /.github/workflows/links-push.yml: -------------------------------------------------------------------------------- 1 | name: Link Checker 2 | 3 | on: 4 | push: 5 | repository_dispatch: 6 | workflow_dispatch: 7 | schedule: 8 | - cron: "0 0 * * *" 9 | 10 | jobs: 11 | linkChecker: 12 | runs-on: ubuntu-latest 13 | steps: 14 | - uses: actions/checkout@v4 15 | 16 | - name: Link Checker 17 | uses: lycheeverse/lychee-action@v1.8.0 18 | with: 19 | fail: true 20 | args: --exclude-mail --exclude .+localhost.+ --max-concurrency 20 ./**/*.md 21 | env: 22 | GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}} 23 | -------------------------------------------------------------------------------- /.github/workflows/test.yaml: -------------------------------------------------------------------------------- 1 | name: Test Examples 2 | 3 | on: 4 | pull_request: 5 | workflow_dispatch: 6 | inputs: 7 | zilla-image-tag: 8 | default: latest 9 | description: Zilla Image tag 10 | type: string 11 | 12 | workflow_call: 13 | inputs: 14 | zilla-image-tag: 15 | type: string 16 | 17 | jobs: 18 | get-examples-dirs: 19 | runs-on: ubuntu-latest 20 | outputs: 21 | all_directories: ${{ steps.all-files.outputs.folders_no_base_path }} 22 | steps: 23 | - uses: actions/checkout@v4 24 | with: 25 | fetch-depth: 0 26 | 27 | - name: Get examples dirs 28 | id: all-files 29 | uses: Drafteame/list-folders-action@main 30 | with: 31 | paths: | 32 | . 33 | omit: | 34 | ^\.github$ 35 | ^\.assets$ 36 | ^\.git$ 37 | ^\.vscode$ 38 | 39 | testing: 40 | strategy: 41 | matrix: 42 | dir: ${{ fromJson(needs.get-examples-dirs.outputs.all_directories) }} 43 | fail-fast: false 44 | needs: 45 | - get-examples-dirs 46 | runs-on: ubuntu-latest 47 | env: 48 | ZILLA_VERSION: ${{ inputs.zilla-image-tag }} 49 | steps: 50 | - name: Checkout 51 | uses: actions/checkout@v4 52 | with: 53 | sparse-checkout: ${{ matrix.dir }} 54 | 55 | - name: Cache Docker images 56 | if: ${{ hashFiles(format('{0}/compose.yaml', matrix.dir)) != '' }} 57 | uses: ScribeMD/docker-cache@0.5.0 58 | with: 59 | key: docker-${{ runner.os }}-${{ matrix.dir }}-${{ hashFiles(format('{0}/compose.yaml', matrix.dir)) }} 60 | 61 | - name: Start Zilla and wait for it to be healthy 62 | working-directory: ${{ matrix.dir }} 63 | run: docker compose up -d --wait 64 | 65 | - name: Execute Test 66 | if: ${{ hashFiles(format('{0}/.github/test.sh', matrix.dir)) != '' }} 67 | working-directory: ${{ matrix.dir }} 68 | run: | 69 | set -o pipefail 70 | ./.github/test.sh | tee $GITHUB_STEP_SUMMARY 71 | 72 | - name: Collect docker logs on failure 73 | if: failure() 74 | uses: jwalton/gh-docker-logs@v2 75 | with: 76 | dest: "./logs" 77 | - name: Tar logs 78 | if: failure() 79 | run: tar cvzf ./logs.tgz ./logs 80 | - name: Upload logs to GitHub 81 | if: failure() 82 | uses: actions/upload-artifact@v4 83 | with: 84 | name: ${{ matrix.dir }}_logs.tgz 85 | path: ./logs.tgz 86 | 87 | - name: Teardown 88 | if: always() && ${{ hashFiles(format('{0}/teardown.sh', matrix.dir)) != '' }} 89 | working-directory: ${{ matrix.dir }} 90 | run: docker compose down --remove-orphans 91 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | _book/ 2 | !zilla/ 3 | .checkstyle 4 | .classpath 5 | .DS_Store 6 | .env 7 | .idea 8 | .project 9 | .settings/ 10 | .vault 11 | .zpm/ 12 | *-cs-cleanup.xml 13 | *-cs-formatter.xml 14 | *.epub 15 | *.iml 16 | *.ipr 17 | *.iws 18 | *.mobi 19 | *.pdf 20 | **/.mvn/timing.properties 21 | **/.mvn/wrapper/maven-wrapper.jar 22 | **/.zilla/engine 23 | buildNumber.properties 24 | dependency-reduced-pom.xml 25 | node_modules/ 26 | pom.xml.next 27 | pom.xml.releaseBackup 28 | pom.xml.tag 29 | pom.xml.versionsBackup 30 | release.properties 31 | target/ 32 | zilla 33 | zilla.yaml.generated 34 | *.out 35 | -------------------------------------------------------------------------------- /.vscode/extensions.json: -------------------------------------------------------------------------------- 1 | { 2 | "recommendations": [ 3 | "aklivity.zilla-vscode-ext" 4 | ], 5 | "unwantedRecommendations": [ 6 | 7 | ] 8 | } 9 | -------------------------------------------------------------------------------- /amqp.reflect/compose.yaml: -------------------------------------------------------------------------------- 1 | name: ${NAMESPACE:-zilla-amqp-reflect} 2 | services: 3 | zilla: 4 | image: ghcr.io/aklivity/zilla:${ZILLA_VERSION:-latest} 5 | restart: unless-stopped 6 | hostname: zilla.examples.dev 7 | ports: 8 | - 7172:7172 9 | healthcheck: 10 | interval: 5s 11 | timeout: 3s 12 | retries: 5 13 | test: ["CMD", "bash", "-c", "echo -n '' > /dev/tcp/127.0.0.1/7172"] 14 | environment: 15 | ZILLA_INCUBATOR_ENABLED: "true" 16 | volumes: 17 | - ./etc:/etc/zilla 18 | command: start -v -e 19 | 20 | networks: 21 | default: 22 | driver: bridge 23 | -------------------------------------------------------------------------------- /amqp.reflect/etc/zilla.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: example 3 | bindings: 4 | north_tcp_server: 5 | type: tcp 6 | kind: server 7 | options: 8 | host: 0.0.0.0 9 | port: 10 | - 7172 11 | routes: 12 | - when: 13 | - port: 7172 14 | exit: north_amqp_server 15 | north_amqp_server: 16 | type: amqp 17 | kind: server 18 | routes: 19 | - when: 20 | - address: zilla 21 | exit: north_fan_server 22 | north_fan_server: 23 | type: fan 24 | kind: server 25 | exit: north_echo_server 26 | north_echo_server: 27 | type: echo 28 | kind: server 29 | telemetry: 30 | exporters: 31 | stdout_logs_exporter: 32 | type: stdout 33 | -------------------------------------------------------------------------------- /asyncapi.http.kafka.proxy/.github/test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -x 3 | 4 | EXIT=0 5 | 6 | # GIVEN 7 | PORT="7114" 8 | INPUT='{"name": "Rocky","id": 1}' 9 | EXPECTED="204" 10 | echo \# Testing asyncapi.http.kafka.proxy/POST 11 | echo PORT="$PORT" 12 | echo INPUT="$INPUT" 13 | echo EXPECTED="$EXPECTED" 14 | echo 15 | 16 | # WHEN 17 | OUTPUT=$(curl -w "%{http_code}" --location "http://localhost:$PORT/pets" \ 18 | --header 'Content-Type: application/json' \ 19 | --data "$INPUT") 20 | RESULT=$? 21 | echo RESULT="$RESULT" 22 | 23 | # THEN 24 | echo OUTPUT="$OUTPUT" 25 | echo EXPECTED="$EXPECTED" 26 | echo 27 | if [ "$RESULT" -eq 0 ] && [ "$OUTPUT" = "$EXPECTED" ]; then 28 | echo ✅ 29 | else 30 | echo ❌ 31 | EXIT=1 32 | fi 33 | 34 | 35 | # GIVEN 36 | PORT="7114" 37 | INPUT='' 38 | EXPECTED='{"name": "Rocky","id": 1}' 39 | echo \# Testing asyncapi.http.kafka.proxy/GET 40 | echo PORT="$PORT" 41 | echo INPUT="$INPUT" 42 | echo EXPECTED="$EXPECTED" 43 | echo 44 | 45 | # WHEN 46 | OUTPUT=$(curl "http://localhost:$PORT/pets" \ 47 | --header 'Content-Type: application/json') 48 | RESULT=$? 49 | echo RESULT="$RESULT" 50 | 51 | # THEN 52 | echo OUTPUT="$OUTPUT" 53 | echo EXPECTED="$EXPECTED" 54 | echo 55 | if [ "$RESULT" -eq 0 ] && [ "$OUTPUT" = "$EXPECTED" ]; then 56 | echo ✅ 57 | else 58 | echo ❌ 59 | EXIT=1 60 | fi 61 | 62 | exit $EXIT 63 | -------------------------------------------------------------------------------- /asyncapi.http.kafka.proxy/etc/zilla.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: zilla-http-kafka-asyncapi 3 | catalogs: 4 | host_filesystem: 5 | type: filesystem 6 | options: 7 | subjects: 8 | http: 9 | path: specs/http-asyncapi.yaml 10 | kafka: 11 | path: specs/kafka-asyncapi.yaml 12 | bindings: 13 | north_http_server: 14 | type: asyncapi 15 | kind: server 16 | options: 17 | specs: 18 | http_api: 19 | catalog: 20 | host_filesystem: 21 | subject: http 22 | exit: south_kafka_proxy 23 | south_kafka_proxy: 24 | type: asyncapi 25 | kind: proxy 26 | options: 27 | specs: 28 | http_api: 29 | catalog: 30 | host_filesystem: 31 | subject: http 32 | kafka_api: 33 | catalog: 34 | host_filesystem: 35 | subject: kafka 36 | routes: 37 | - when: 38 | - api-id: http_api 39 | operation-id: createPets 40 | exit: south_kafka_client 41 | with: 42 | api-id: kafka_api 43 | operation-id: addPet 44 | - when: 45 | - api-id: http_api 46 | exit: south_kafka_client 47 | with: 48 | api-id: kafka_api 49 | south_kafka_client: 50 | type: asyncapi 51 | kind: client 52 | options: 53 | specs: 54 | kafka_api: 55 | catalog: 56 | host_filesystem: 57 | subject: kafka 58 | telemetry: 59 | exporters: 60 | stdout_exporter: 61 | type: stdout 62 | -------------------------------------------------------------------------------- /asyncapi.mqtt.kafka.proxy/.github/test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -x 3 | 4 | EXIT=0 5 | 6 | # GIVEN 7 | PORT="7183" 8 | INPUT='{"lumens":50,"sentAt":"2024-06-07T12:34:32.000Z"}' 9 | EXPECTED='{"lumens":50,"sentAt":"2024-06-07T12:34:32.000Z"}' 10 | echo \# Testing asyncapi.mqtt.kafka.proxy 11 | echo PORT="$PORT" 12 | echo INPUT="$INPUT" 13 | echo EXPECTED="$EXPECTED" 14 | echo 15 | 16 | # WHEN 17 | 18 | for i in $(seq 1 5); do 19 | docker compose -p zilla-asyncapi-mqtt-kafka-proxy exec -T mosquitto-cli \ 20 | mosquitto_pub --url mqtt://zilla.examples.dev:"$PORT"/zilla --message "Test" 21 | 22 | if [ $? -eq 0 ]; then 23 | echo "✅ Zilla is reachable." 24 | break 25 | fi 26 | 27 | sleep 2 28 | done 29 | 30 | OUTPUT=$( 31 | docker compose -p zilla-asyncapi-mqtt-kafka-proxy exec -T mosquitto-cli \ 32 | timeout 5s mosquitto_sub --url mqtt://zilla.examples.dev:"$PORT"/smartylighting/streetlights/1/0/event/+/lighting/measured & 33 | 34 | SUB_PID=$! 35 | 36 | sleep 1 37 | 38 | docker compose -p zilla-asyncapi-mqtt-kafka-proxy exec -T mosquitto-cli \ 39 | mosquitto_pub --url mqtt://zilla.examples.dev:"$PORT"/smartylighting/streetlights/1/0/event/1/lighting/measured --message "$INPUT" 40 | 41 | wait $SUB_PID 42 | ) 43 | 44 | RESULT=$? 45 | echo RESULT="$RESULT" 46 | 47 | # THEN 48 | echo OUTPUT="$OUTPUT" 49 | echo EXPECTED="$EXPECTED" 50 | echo 51 | if [ "$RESULT" -eq 0 ] && [ "$OUTPUT" = "$EXPECTED" ]; then 52 | echo ✅ 53 | else 54 | echo ❌ 55 | EXIT=1 56 | fi 57 | 58 | exit $EXIT 59 | -------------------------------------------------------------------------------- /asyncapi.mqtt.kafka.proxy/README.md: -------------------------------------------------------------------------------- 1 | # asyncapi.mqtt.kafka.proxy 2 | 3 | In this guide, you create Kafka topics and use Zilla to mediate MQTT broker messages onto those topics. 4 | Zilla implements MQTT API defined in AsyncAPI specifications and uses Kafka API defined AsyncAPI proxy MQTT messages to Kafka. 5 | 6 | ## Setup 7 | 8 | To `start` the Docker Compose stack defined in the [compose.yaml](compose.yaml) file, use: 9 | 10 | ```bash 11 | docker compose up -d 12 | ``` 13 | 14 | ### Using this example 15 | 16 | Using eclipse-mosquitto subscribe to the `smartylighting/streetlights/1/0/event/+/lighting/measured` topic. 17 | 18 | ```bash 19 | docker compose -p zilla-asyncapi-mqtt-kafka-proxy exec -T mosquitto-cli \ 20 | mosquitto_sub --url mqtt://zilla.examples.dev:7183/smartylighting/streetlights/1/0/event/+/lighting/measured --debug 21 | ``` 22 | 23 | output: 24 | 25 | ```text 26 | Client null sending CONNECT 27 | Client 26c02b9a-0e29-44c6-9f0e-277655c8d712 received CONNACK (0) 28 | Client 26c02b9a-0e29-44c6-9f0e-277655c8d712 sending SUBSCRIBE (Mid: 1, Topic: smartylighting/streetlights/1/0/event/+/lighting/measured, QoS: 0, Options: 0x00) 29 | Client 26c02b9a-0e29-44c6-9f0e-277655c8d712 received SUBACK 30 | Subscribed (mid: 1): 0 31 | Client 26c02b9a-0e29-44c6-9f0e-277655c8d712 received PUBLISH (d0, q0, r0, m0, 'smartylighting/streetlights/1/0/event/5/lighting/measured', ... (49 bytes)) 32 | {"lumens":50,"sentAt":"2024-06-07T12:34:32.000Z"} 33 | ``` 34 | 35 | In a separate session, publish a valid message on the `smartylighting/streetlights/1/0/event/1/lighting/measured` topic. 36 | 37 | ```bash 38 | docker compose -p zilla-asyncapi-mqtt-kafka-proxy exec -T mosquitto-cli \ 39 | mosquitto_pub --url mqtt://zilla.examples.dev:7183/smartylighting/streetlights/1/0/event/1/lighting/measured --message '{"lumens":50,"sentAt":"2024-06-07T12:34:32.000Z"}' --debug 40 | ``` 41 | 42 | output: 43 | 44 | ``` 45 | Client null sending CONNECT 46 | Client a1f4ad8c-c9e8-4671-ad46-69030d4f1c9a received CONNACK (0) 47 | Client a1f4ad8c-c9e8-4671-ad46-69030d4f1c9a sending PUBLISH (d0, q0, r0, m1, 'smartylighting/streetlights/1/0/event/1/lighting/measured', ... (49 bytes)) 48 | Client a1f4ad8c-c9e8-4671-ad46-69030d4f1c9a sending DISCONNECT 49 | ``` 50 | 51 | Now attempt to publish an invalid message by setting `lumens` property to a negative value. 52 | 53 | ```bash 54 | docker compose -p zilla-asyncapi-mqtt-kafka-proxy exec -T mosquitto-cli \ 55 | mosquitto_pub --url mqtt://zilla.examples.dev:7183/smartylighting/streetlights/1/0/event/1/lighting/measured -m '{"lumens":-1,"sentAt":"2024-06-07T12:34:32.000Z"}' --repeat 2 --repeat-delay 3 --debug 56 | ``` 57 | 58 | output: 59 | 60 | ``` 61 | Client null sending CONNECT 62 | Client 30157eed-0ea7-42c6-91e8-466d1dd0ab66 received CONNACK (0) 63 | Client 30157eed-0ea7-42c6-91e8-466d1dd0ab66 sending PUBLISH (d0, q0, r0, m1, 'smartylighting/streetlights/1/0/event/1/lighting/measured', ... (49 bytes)) 64 | Received DISCONNECT (153) 65 | Error: The client is not currently connected. 66 | ``` 67 | 68 | ## Teardown 69 | 70 | To remove any resources created by the Docker Compose stack, use: 71 | 72 | ```bash 73 | docker compose down 74 | ``` 75 | 76 | -------------------------------------------------------------------------------- /asyncapi.mqtt.kafka.proxy/etc/zilla.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: zilla-mqtt-kafka-broker 3 | catalogs: 4 | host_filesystem: 5 | type: filesystem 6 | options: 7 | subjects: 8 | mqtt: 9 | path: specs/mqtt-asyncapi.yaml 10 | kafka: 11 | path: specs/kafka-asyncapi.yaml 12 | bindings: 13 | north_mqtt_server: 14 | type: asyncapi 15 | kind: server 16 | options: 17 | specs: 18 | mqtt_api: 19 | catalog: 20 | host_filesystem: 21 | subject: mqtt 22 | exit: south_kafka_proxy 23 | south_kafka_proxy: 24 | type: asyncapi 25 | kind: proxy 26 | options: 27 | specs: 28 | mqtt_api: 29 | catalog: 30 | host_filesystem: 31 | subject: mqtt 32 | kafka_api: 33 | catalog: 34 | host_filesystem: 35 | subject: kafka 36 | mqtt-kafka: 37 | channels: 38 | sessions: mqttSessions 39 | retained: mqttRetained 40 | messages: mqttMessages 41 | routes: 42 | - when: 43 | - api-id: mqtt_api 44 | operation-id: turnOn 45 | exit: south_kafka_client 46 | with: 47 | api-id: kafka_api 48 | operation-id: toStreetlightData 49 | - when: 50 | - api-id: mqtt_api 51 | operation-id: receiveLightMeasurement 52 | exit: south_kafka_client 53 | with: 54 | api-id: kafka_api 55 | operation-id: onStreetlightData 56 | - when: 57 | - api-id: mqtt_api 58 | operation-id: sendLightMeasurement 59 | exit: south_kafka_client 60 | with: 61 | api-id: kafka_api 62 | operation-id: toStreetlightData 63 | - when: 64 | - api-id: mqtt_api 65 | operation-id: turnOff 66 | exit: south_kafka_client 67 | with: 68 | api-id: kafka_api 69 | operation-id: toStreetlightData 70 | - when: 71 | - api-id: mqtt_api 72 | operation-id: dimLight 73 | exit: south_kafka_client 74 | with: 75 | api-id: kafka_api 76 | operation-id: toStreetlightData 77 | south_kafka_client: 78 | type: asyncapi 79 | kind: client 80 | options: 81 | specs: 82 | kafka_api: 83 | catalog: 84 | host_filesystem: 85 | subject: kafka 86 | telemetry: 87 | exporters: 88 | stdout_logs_exporter: 89 | type: stdout 90 | -------------------------------------------------------------------------------- /asyncapi.mqtt.kafka.proxy/kafka-asyncapi.yaml: -------------------------------------------------------------------------------- 1 | asyncapi: 3.0.0 2 | info: 3 | title: Zilla Kafka Proxy 4 | version: 1.0.0 5 | license: 6 | name: Aklivity Community License 7 | servers: 8 | plain: 9 | host: kafka.examples.dev:29092 10 | protocol: kafka 11 | 12 | operations: 13 | onStreetlightData: 14 | action: receive 15 | channel: 16 | $ref: "#/channels/streetlightData" 17 | toStreetlightData: 18 | action: send 19 | channel: 20 | $ref: "#/channels/streetlightData" 21 | 22 | channels: 23 | streetlightData: 24 | description: This channel contains a message for streetlights. 25 | address: streetlights 26 | messages: 27 | streetlightData: 28 | $ref: "#/components/messages/streetlightData" 29 | mqttSessions: 30 | description: This channel contains MQTT sessions. 31 | address: mqtt-sessions 32 | mqttMessages: 33 | description: This channel contains MQTT messages. 34 | address: mqtt-messages 35 | mqttRetained: 36 | description: This channel contains MQTT retained messages. 37 | address: mqtt-retained 38 | 39 | components: 40 | messages: 41 | streetlightData: 42 | payload: 43 | type: object 44 | properties: 45 | streetlightId: 46 | type: integer 47 | description: This property describes the id of the streetlight 48 | message: 49 | type: string 50 | description: This property describes message of the streetlight 51 | -------------------------------------------------------------------------------- /asyncapi.mqtt.proxy/.github/test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -x 3 | 4 | EXIT=0 5 | 6 | # GIVEN 7 | PORT="7183" 8 | INPUT="Hello, Zilla!" 9 | EXPECTED="" 10 | echo \# Testing asyncapi.sse.kafka.proxy/ 11 | echo PORT="$PORT" 12 | echo INPUT="$INPUT" 13 | echo EXPECTED="$EXPECTED" 14 | echo 15 | 16 | # WHEN 17 | OUTPUT=$(echo "$INPUT" | nc -w 1 localhost $PORT) 18 | RESULT=$? 19 | echo RESULT="$RESULT" 20 | 21 | # THEN 22 | echo OUTPUT="$OUTPUT" 23 | echo EXPECTED="$EXPECTED" 24 | echo 25 | if [ "$RESULT" -eq 0 ] && [ "$OUTPUT" = "$EXPECTED" ]; then 26 | echo ✅ 27 | else 28 | echo ❌ 29 | EXIT=1 30 | fi 31 | 32 | # TODO remove once fixed 33 | echo '❌ Tested on main. and does not work with described instructions' 34 | echo 'Refer: https://github.com/aklivity/zilla/issues/1416' 35 | EXIT=1 36 | 37 | exit $EXIT 38 | -------------------------------------------------------------------------------- /asyncapi.mqtt.proxy/README.md: -------------------------------------------------------------------------------- 1 | # asyncapi.mqtt.proxy 2 | 3 | Listens on mqtt port `7183` and will forward mqtt publish messages and proxies subscribes to mosquitto MQTT broker listening on `1883` for topic `smartylighting/streetlights/1/0/event/+/lighting/measured`. 4 | 5 | ## Requirements 6 | 7 | - docker compose 8 | - mosquitto 9 | 10 | ## Setup 11 | 12 | To `start` the Docker Compose stack defined in the [compose.yaml](compose.yaml) file, use: 13 | 14 | ```bash 15 | docker compose up -d 16 | ``` 17 | 18 | ### Verify behavior 19 | 20 | Connect a subscribing client to mosquitto broker to port `1883`. Using mosquitto_pub client publish `{"id":"1","status":"on"}` to Zilla on port `7183`. Verify that the message arrived to on the first client. 21 | 22 | ```bash 23 | docker compose -p zilla-asyncapi-mqtt-proxy exec -T mosquitto-cli \ 24 | mosquitto_sub --url mqtt://zilla.examples.dev:7183/smartylighting/streetlights/1/0/event/+/lighting/measured --debug 25 | ``` 26 | 27 | output: 28 | 29 | ``` 30 | Client null sending CONNECT 31 | Client auto-5A1C0A41-0D16-497D-6C3B-527A93E421E6 received CONNACK (0) 32 | Client auto-5A1C0A41-0D16-497D-6C3B-527A93E421E6 sending SUBSCRIBE (Mid: 1, Topic: smartylighting/streetlights/1/0/event/+/lighting/measured, QoS: 0, Options: 0x00) 33 | Client auto-5A1C0A41-0D16-497D-6C3B-527A93E421E6 received SUBACK 34 | Subscribed (mid: 1): 0 35 | {"id":"1","status":"on"} 36 | ``` 37 | 38 | ```bash 39 | docker compose -p zilla-asyncapi-mqtt-proxy exec -T mosquitto-cli \ 40 | mosquitto_pub --url mqtt://zilla.examples.dev:7183/smartylighting/streetlights/1/0/event/1/lighting/measured --message '{"id":"1","status":"on"}' --debug 41 | ``` 42 | 43 | output: 44 | 45 | ``` 46 | Client null sending CONNECT 47 | Client 244684c7-fbaf-4e08-b382-a1a2329cf9ec received CONNACK (0) 48 | Client 244684c7-fbaf-4e08-b382-a1a2329cf9ec sending PUBLISH (d0, q0, r0, m1, 'smartylighting/streetlights/1/0/event/1/lighting/measured', ... (24 bytes)) 49 | Client 244684c7-fbaf-4e08-b382-a1a2329cf9ec sending DISCONNECT 50 | ``` 51 | 52 | Now attempt to publish an invalid message, with property `stat` instead of `status`. 53 | 54 | ```bash 55 | docker compose -p zilla-asyncapi-mqtt-proxy exec -T mosquitto-cli \ 56 | mosquitto_pub --url mqtt://zilla.examples.dev:7183/smartylighting/streetlights/1/0/event/1/lighting/measured --message '{"id":"1","stat":"off"}' --repeat 2 --repeat-delay 3 --debug 57 | ``` 58 | 59 | output: 60 | 61 | ``` 62 | Client null sending CONNECT 63 | Client e7e9ddb0-f8c9-43a0-840f-dab9981a9de3 received CONNACK (0) 64 | Client e7e9ddb0-f8c9-43a0-840f-dab9981a9de3 sending PUBLISH (d0, q0, r0, m1, 'smartylighting/streetlights/1/0/event/1/lighting/measured', ... (23 bytes)) 65 | Received DISCONNECT (153) 66 | Error: The client is not currently connected. 67 | ``` 68 | 69 | Note that the invalid message is rejected with error code `153` `payload format invalid`, and therefore not received by the subscriber. 70 | 71 | ## Teardown 72 | 73 | To remove any resources created by the Docker Compose stack, use: 74 | 75 | ```bash 76 | docker compose down 77 | ``` 78 | 79 | -------------------------------------------------------------------------------- /asyncapi.mqtt.proxy/compose.yaml: -------------------------------------------------------------------------------- 1 | name: ${NAMESPACE:-zilla-asyncapi-mqtt-proxy} 2 | services: 3 | zilla: 4 | image: ghcr.io/aklivity/zilla:${ZILLA_VERSION:-latest} 5 | restart: unless-stopped 6 | hostname: zilla.examples.dev 7 | ports: 8 | - 7183:7183 9 | healthcheck: 10 | interval: 5s 11 | timeout: 3s 12 | retries: 5 13 | test: ["CMD", "bash", "-c", "echo -n '' > /dev/tcp/127.0.0.1/7183"] 14 | environment: 15 | MOSQUITTO_BROKER_HOST: mosquitto 16 | MOSQUITTO_BROKER_PORT: 1883 17 | volumes: 18 | - ./etc:/etc/zilla 19 | - ./mqtt-asyncapi.yaml:/etc/zilla/specs/mqtt-asyncapi.yaml 20 | command: start -v -e 21 | 22 | mosquitto: 23 | image: eclipse-mosquitto:2.0 24 | restart: unless-stopped 25 | ports: 26 | - 1883:1883 27 | configs: 28 | - source: mosquitto.conf 29 | target: /mosquitto/config/mosquitto.conf 30 | 31 | mosquitto-cli: 32 | image: eclipse-mosquitto:2.0 33 | command: "/bin/sh" 34 | stdin_open: true 35 | tty: true 36 | 37 | configs: 38 | mosquitto.conf: 39 | content: | 40 | # DO NOT USE IN PRODUCTION 41 | allow_anonymous true 42 | listener 1883 43 | protocol mqtt 44 | 45 | networks: 46 | default: 47 | driver: bridge 48 | -------------------------------------------------------------------------------- /asyncapi.mqtt.proxy/etc/zilla.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: zilla-mqtt-kafka-broker 3 | catalogs: 4 | host_filesystem: 5 | type: filesystem 6 | options: 7 | subjects: 8 | smartylighting: 9 | path: specs/mqtt-asyncapi.yaml 10 | bindings: 11 | mqtt_proxy_server: 12 | type: asyncapi 13 | kind: server 14 | options: 15 | specs: 16 | my-asyncapi-spec: 17 | catalog: 18 | host_filesystem: 19 | subject: smartylighting 20 | exit: mqtt_proxy_client 21 | mqtt_proxy_client: 22 | type: asyncapi 23 | kind: client 24 | options: 25 | specs: 26 | my-asyncapi-spec: 27 | catalog: 28 | host_filesystem: 29 | subject: smartylighting 30 | tcp: 31 | host: ${{env.MOSQUITTO_BROKER_HOST}} 32 | port: ${{env.MOSQUITTO_BROKER_PORT}} 33 | telemetry: 34 | exporters: 35 | stdout_logs_exporter: 36 | type: stdout 37 | -------------------------------------------------------------------------------- /asyncapi.mqtt.proxy/mqtt-asyncapi.yaml: -------------------------------------------------------------------------------- 1 | asyncapi: 3.0.0 2 | info: 3 | title: Zilla MQTT Proxy 4 | version: 1.0.0 5 | license: 6 | name: Aklivity Community License 7 | servers: 8 | plain: 9 | host: localhost:7183 10 | protocol: mqtt 11 | defaultContentType: application/json 12 | 13 | channels: 14 | smartylighting: 15 | address: "smartylighting/streetlights/1/0/event/1/lighting/measured" 16 | title: MQTT Topic to produce & consume topic. 17 | messages: 18 | item: 19 | $ref: "#/components/messages/item" 20 | 21 | operations: 22 | sendEvents: 23 | action: send 24 | channel: 25 | $ref: "#/channels/smartylighting" 26 | 27 | receiveEvents: 28 | action: receive 29 | channel: 30 | $ref: "#/channels/smartylighting" 31 | 32 | components: 33 | messages: 34 | item: 35 | name: event 36 | title: An event 37 | headers: 38 | type: object 39 | properties: 40 | idempotency-key: 41 | description: Unique identifier for a given event 42 | type: string 43 | id: 44 | description: Street Light ID 45 | type: string 46 | contentType: application/json 47 | payload: 48 | type: object 49 | properties: 50 | item: 51 | $ref: "#/components/schemas/item" 52 | schemas: 53 | item: 54 | type: object 55 | properties: 56 | id: 57 | type: string 58 | status: 59 | type: string 60 | required: 61 | - id 62 | - status 63 | -------------------------------------------------------------------------------- /asyncapi.sse.kafka.proxy/.github/test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -x 3 | 4 | EXIT=0 5 | 6 | # GIVEN 7 | PORT="7114" 8 | KAFKA_BOOTSTRAP_SERVER="kafka.examples.dev:29092" 9 | INPUT='{"id": 1,"name":"Hello World!"}' 10 | EXPECTED='data:{ "id": 1, "name": "Hello World!"}' 11 | echo \# Testing asyncapi.sse.kafka.proxy/ 12 | echo PORT="$PORT" 13 | echo KAFKA_BOOTSTRAP_SERVER="$KAFKA_BOOTSTRAP_SERVER" 14 | echo INPUT="$INPUT" 15 | echo EXPECTED="$EXPECTED" 16 | echo 17 | 18 | # WHEN 19 | # send request to zilla 20 | timeout 3s curl -N --http2 -H "Accept:text/event-stream" "http://localhost:$PORT/events" | tee .testoutput & 21 | 22 | # push response to kafka with kafkacat 23 | echo "$INPUT" | 24 | docker compose -p zilla-http-kafka-sync exec -T kafkacat \ 25 | kafkacat -P \ 26 | -b $KAFKA_BOOTSTRAP_SERVER \ 27 | -t events \ 28 | -k "1" 29 | 30 | # fetch the output of zilla request; try 5 times 31 | for i in $(seq 0 2); do 32 | sleep $i 33 | OUTPUT=$(cat .testoutput | grep "^data:") 34 | if [ -n "$OUTPUT" ]; then 35 | break 36 | fi 37 | done 38 | rm .testoutput 39 | 40 | # THEN 41 | echo OUTPUT="$OUTPUT" 42 | echo EXPECTED="$EXPECTED" 43 | echo 44 | if [ "$OUTPUT" = "$EXPECTED" ]; then 45 | echo ✅ 46 | else 47 | echo ❌ 48 | EXIT=1 49 | fi 50 | 51 | # TODO remove once fixed 52 | echo '❌ curl: (52) Empty reply from server. Tested on main. and does not work with described instructions' 53 | echo 'Refer: https://github.com/aklivity/zilla/issues/1417' 54 | EXIT=1 55 | 56 | exit $EXIT 57 | -------------------------------------------------------------------------------- /asyncapi.sse.kafka.proxy/README.md: -------------------------------------------------------------------------------- 1 | # asyncapi.sse.kafka.proxy 2 | 3 | In this guide, you create Kafka topics and use Zilla to implement an SSE API where Zilla listens on http port 7114 and will stream back whatever is published to the events topic in Kafka. 4 | Zilla is implementing the SSE endpoints defined in an AsyncAPI 3.x spec and proxying them onto Kafka topics defined in an AsyncAPI 3.x spec based on the operations defined in each spec. 5 | 6 | ## Setup 7 | 8 | To `start` the Docker Compose stack defined in the [compose.yaml](compose.yaml) file, use: 9 | 10 | ```bash 11 | docker compose up -d 12 | ``` 13 | 14 | ### Verify behaviour 15 | 16 | Using `curl` client connect to the SSE stream. 17 | 18 | ```bash 19 | curl -N --http2 -H "Accept:text/event-stream" "http://localhost:7114/events" 20 | ``` 21 | 22 | output: 23 | 24 | ``` 25 | * Trying 127.0.0.1:7114... 26 | * Connected to localhost (127.0.0.1) port 7114 (#0) 27 | > GET /events HTTP/1.1 28 | > Host: localhost:7114 29 | > User-Agent: curl/7.88.1 30 | > Connection: Upgrade, HTTP2-Settings 31 | > Upgrade: h2c 32 | > HTTP2-Settings: AAMAAABkAAQCAAAAAAIAAAAA 33 | > Accept:text/event-stream 34 | > 35 | < HTTP/1.1 200 OK 36 | < Content-Type: text/event-stream 37 | < Transfer-Encoding: chunked 38 | < Access-Control-Allow-Origin: * 39 | < 40 | id:AQIAAg== 41 | data:{ "id": 1, "name": "Hello World!"} 42 | ``` 43 | 44 | In another terminal window use `kafkacat` to publish to the `events` Kafka topic. 45 | 46 | ```bash 47 | echo '{ "id": 1, "name": "Hello World!"}' | docker compose -p zilla-asyncapi-sse-kafka-proxy exec -T kafkacat \ 48 | kafkacat -P \ 49 | -b kafka.examples.dev:29092 \ 50 | -k "1" -t events 51 | ``` 52 | 53 | On the `curl` client, the event should appear. 54 | 55 | ## Teardown 56 | 57 | To remove any resources created by the Docker Compose stack, use: 58 | 59 | ```bash 60 | docker compose down 61 | ``` 62 | 63 | -------------------------------------------------------------------------------- /asyncapi.sse.kafka.proxy/etc/zilla.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: zilla-sse-kafka-asyncapi 3 | catalogs: 4 | host_filesystem: 5 | type: filesystem 6 | options: 7 | subjects: 8 | sse: 9 | path: specs/sse-asyncapi.yaml 10 | kafka: 11 | path: specs/kafka-asyncapi.yaml 12 | bindings: 13 | north_sse_server: 14 | type: asyncapi 15 | kind: server 16 | options: 17 | specs: 18 | sse_api: 19 | catalog: 20 | host_filesystem: 21 | subject: sse 22 | exit: south_kafka_proxy 23 | south_kafka_proxy: 24 | type: asyncapi 25 | kind: proxy 26 | options: 27 | specs: 28 | sse_api: 29 | catalog: 30 | host_filesystem: 31 | subject: sse 32 | kafka_api: 33 | catalog: 34 | host_filesystem: 35 | subject: kafka 36 | routes: 37 | - when: 38 | - api-id: sse_api 39 | exit: south_kafka_client 40 | with: 41 | api-id: kafka_api 42 | south_kafka_client: 43 | type: asyncapi 44 | kind: client 45 | options: 46 | specs: 47 | kafka_api: 48 | catalog: 49 | host_filesystem: 50 | subject: kafka 51 | telemetry: 52 | exporters: 53 | stdout_exporter: 54 | type: stdout 55 | -------------------------------------------------------------------------------- /asyncapi.sse.kafka.proxy/kafka-asyncapi.yaml: -------------------------------------------------------------------------------- 1 | asyncapi: 3.0.0 2 | info: 3 | title: Eventstore Kafka API 4 | version: 1.0.0 5 | defaultContentType: application/json 6 | servers: 7 | plain: 8 | host: kafka.examples.dev:29092 9 | protocol: kafka 10 | channels: 11 | events: 12 | address: "events" 13 | messages: 14 | event: 15 | $ref: "#/components/messages/event" 16 | description: The topic on which event values may be produced and consumed. 17 | operations: 18 | receiveEvents: 19 | action: receive 20 | channel: 21 | $ref: "#/channels/events" 22 | summary: >- 23 | List all events. 24 | traits: 25 | - $ref: "#/components/operationTraits/kafka" 26 | messages: 27 | - $ref: "#/channels/eventstore/messages/event" 28 | components: 29 | messages: 30 | event: 31 | name: Event 32 | title: Event 33 | summary: >- 34 | Inform about Event. 35 | contentType: application/json 36 | traits: 37 | - $ref: "#/components/messageTraits/commonHeaders" 38 | payload: 39 | $ref: "#/components/schemas/eventPayload" 40 | schemas: 41 | eventPayload: 42 | type: object 43 | properties: 44 | id: 45 | type: integer 46 | minimum: 0 47 | description: Event id. 48 | name: 49 | type: string 50 | description: Event name. 51 | messageTraits: 52 | commonHeaders: 53 | headers: 54 | type: object 55 | properties: 56 | my-app-header: 57 | type: integer 58 | minimum: 0 59 | maximum: 100 60 | operationTraits: 61 | kafka: 62 | bindings: 63 | kafka: 64 | clientId: 65 | type: string 66 | enum: 67 | - my-app-id 68 | -------------------------------------------------------------------------------- /asyncapi.sse.kafka.proxy/sse-asyncapi.yaml: -------------------------------------------------------------------------------- 1 | asyncapi: 3.0.0 2 | info: 3 | title: AsyncAPI Eventstore 4 | license: 5 | name: MIT 6 | version: 1.0.0 7 | servers: 8 | plain_sse: 9 | host: localhost:7114 10 | protocol: sse 11 | defaultContentType: application/json 12 | 13 | channels: 14 | events: 15 | address: /events 16 | messages: 17 | event: 18 | $ref: "#/components/messages/event" 19 | 20 | operations: 21 | receiveEvents: 22 | action: receive 23 | channel: 24 | $ref: "#/channels/events" 25 | 26 | components: 27 | schemas: 28 | eventPayload: 29 | type: object 30 | properties: 31 | id: 32 | type: integer 33 | minimum: 0 34 | description: Event id. 35 | name: 36 | type: string 37 | description: Event name. 38 | tag: 39 | type: string 40 | description: Tag. 41 | messages: 42 | event: 43 | name: Event 44 | title: Event 45 | summary: >- 46 | Inform about Event. 47 | contentType: application/json 48 | payload: 49 | $ref: "#/components/schemas/eventPayload" 50 | -------------------------------------------------------------------------------- /asyncapi.sse.proxy/.github/test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -x 3 | 4 | EXIT=0 5 | 6 | # GIVEN 7 | PORT="7114" 8 | INPUT_GOOD='{ "name": "event name", "data": { "id": 1, "name": "Hello World!" } }' 9 | INPUT_BAD='{ "name": "event name", "data": { "id": -1, "name": "Hello bad World!" } }' 10 | EXPECTED='data:{ "id": 1, "name": "Hello World!" }' 11 | echo \# Testing asyncapi.sse.proxy/ 12 | echo PORT="$PORT" 13 | echo INPUT_GOOD="$INPUT_GOOD" 14 | echo INPUT_BAD="$INPUT_BAD" 15 | echo EXPECTED="$EXPECTED" 16 | echo 17 | 18 | # WHEN 19 | # send request to zilla 20 | timeout 3s curl -N --http2 -H "Accept:text/event-stream" "http://localhost:$PORT/events/1" | tee .testoutput & 21 | 22 | # push response to kafka with kafkacat 23 | echo "$INPUT_GOOD" | nc -c localhost 7001 24 | echo "$INPUT_BAD" | nc -c localhost 7001 25 | 26 | # fetch the output of zilla request; try 5 times 27 | for i in $(seq 0 2); do 28 | sleep $i 29 | OUTPUT=$(cat .testoutput | grep "^data:") 30 | if [ -n "$OUTPUT" ]; then 31 | break 32 | fi 33 | done 34 | rm .testoutput 35 | 36 | # THEN 37 | echo OUTPUT="$OUTPUT" 38 | echo EXPECTED="$EXPECTED" 39 | echo 40 | if [ "$OUTPUT" = "$EXPECTED" ]; then 41 | echo ✅ 42 | else 43 | echo ❌ 44 | EXIT=1 45 | fi 46 | 47 | # TODO remove once fixed 48 | echo '❌ curl: (52) Empty reply from server. Tested on main. and does not work with described instructions' 49 | echo 'Refer: https://github.com/aklivity/zilla/issues/1417' 50 | EXIT=1 51 | 52 | exit $EXIT 53 | -------------------------------------------------------------------------------- /asyncapi.sse.proxy/README.md: -------------------------------------------------------------------------------- 1 | # asyncapi.sse.proxy 2 | 3 | Listens on http port `7114` and will stream back whatever is published to `sse_server` on tcp port `7001`. 4 | 5 | ## Requirements 6 | 7 | - nc 8 | - docker compose 9 | 10 | ## Setup 11 | 12 | To `start` the Docker Compose stack defined in the [compose.yaml](compose.yaml) file, use: 13 | 14 | ```bash 15 | docker compose up -d 16 | ``` 17 | 18 | ### Verify behavior 19 | 20 | Connect `curl` client first to Zilla over SSE. 21 | 22 | ```bash 23 | curl -N --http2 -H "Accept:text/event-stream" -v "http://localhost:7114/events/1" 24 | ``` 25 | 26 | output: 27 | 28 | ```text 29 | * Trying 127.0.0.1:7114... 30 | * Connected to localhost (127.0.0.1) port 7114 (#0) 31 | > GET /events/1 HTTP/1.1 32 | > Host: localhost:7114 33 | > User-Agent: curl/7.88.1 34 | > Connection: Upgrade, HTTP2-Settings 35 | > Upgrade: h2c 36 | > HTTP2-Settings: AAMAAABkAAQCAAAAAAIAAAAA 37 | > Accept:text/event-stream 38 | > 39 | < HTTP/1.1 200 OK 40 | < Content-Type: text/event-stream 41 | < Transfer-Encoding: chunked 42 | < Access-Control-Allow-Origin: * 43 | < 44 | event:event name 45 | data:{ "id": 1, "name": "Hello World!" } 46 | ``` 47 | 48 | From another terminal send an invalid data from `nc` client. Note that the invalid event will not arrive to the client. 49 | 50 | ```bash 51 | echo '{ "name": "event name", "data": { "id": -1, "name": "Hello World!" } }' | nc -c localhost 7001 52 | ``` 53 | 54 | Now send a valid event, where the id is non-negative and the message will arrive to `curl` client. 55 | 56 | ```bash 57 | echo '{ "name": "event name", "data": { "id": 1, "name": "Hello World!" } }' | nc -c localhost 7001 58 | ``` 59 | 60 | ## Teardown 61 | 62 | To remove any resources created by the Docker Compose stack, use: 63 | 64 | ```bash 65 | docker compose down 66 | ``` 67 | -------------------------------------------------------------------------------- /asyncapi.sse.proxy/compose.yaml: -------------------------------------------------------------------------------- 1 | name: ${NAMESPACE:-zilla-asyncapi-sse-proxy} 2 | services: 3 | zilla: 4 | image: ghcr.io/aklivity/zilla:${ZILLA_VERSION:-latest} 5 | restart: unless-stopped 6 | hostname: zilla.examples.dev 7 | ports: 8 | - 7114:7114 9 | healthcheck: 10 | interval: 5s 11 | timeout: 3s 12 | retries: 5 13 | test: ["CMD", "bash", "-c", "echo -n '' > /dev/tcp/127.0.0.1/7114"] 14 | environment: 15 | SSE_SERVER_HOST: sse-server 16 | SSE_SERVER_PORT: 8001 17 | volumes: 18 | - ./etc:/etc/zilla 19 | - ./sse-asyncapi.yaml:/etc/zilla/specs/sse-asyncapi.yaml 20 | command: start -v -e 21 | 22 | sse-server: 23 | image: ghcr.io/aklivity/extras-sse-server:sha-42ad67e 24 | restart: unless-stopped 25 | ports: 26 | - 8001:8001 27 | - 7001:7001 28 | stdin_open: true 29 | tty: true 30 | healthcheck: 31 | interval: 5s 32 | timeout: 3s 33 | retries: 5 34 | test: netstat -an | grep 8001 > /dev/null; if [ 0 != $? ]; then exit 1; fi; 35 | command: -v -p 8001 -i 7001 36 | 37 | networks: 38 | default: 39 | driver: bridge 40 | -------------------------------------------------------------------------------- /asyncapi.sse.proxy/etc/zilla.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: zilla-asyncapi-sse-proxy 3 | catalogs: 4 | host_filesystem: 5 | type: filesystem 6 | options: 7 | subjects: 8 | eventstore: 9 | path: specs/sse-asyncapi.yaml 10 | bindings: 11 | sse_proxy_server: 12 | type: asyncapi 13 | kind: server 14 | options: 15 | specs: 16 | my-asyncapi-spec: 17 | catalog: 18 | host_filesystem: 19 | subject: eventstore 20 | exit: sse_proxy_client 21 | sse_proxy_client: 22 | type: asyncapi 23 | kind: client 24 | options: 25 | specs: 26 | my-asyncapi-spec: 27 | catalog: 28 | host_filesystem: 29 | subject: eventstore 30 | tcp: 31 | host: ${{env.SSE_SERVER_HOST}} 32 | port: ${{env.SSE_SERVER_PORT}} 33 | telemetry: 34 | exporters: 35 | stdout_logs_exporter: 36 | type: stdout 37 | -------------------------------------------------------------------------------- /asyncapi.sse.proxy/sse-asyncapi.yaml: -------------------------------------------------------------------------------- 1 | asyncapi: 3.0.0 2 | info: 3 | title: AsyncAPI Eventstore 4 | license: 5 | name: MIT 6 | version: 1.0.0 7 | servers: 8 | plain: 9 | host: localhost:7114 10 | protocol: sse 11 | defaultContentType: application/json 12 | 13 | channels: 14 | events: 15 | address: /events 16 | messages: 17 | event: 18 | $ref: "#/components/messages/event" 19 | showEventById: 20 | address: /events/{id} 21 | messages: 22 | event: 23 | $ref: "#/components/messages/event" 24 | 25 | operations: 26 | getEvents: 27 | action: receive 28 | channel: 29 | $ref: "#/channels/showEventById" 30 | 31 | components: 32 | schemas: 33 | eventPayload: 34 | type: object 35 | properties: 36 | id: 37 | type: integer 38 | minimum: 0 39 | description: Event id. 40 | name: 41 | type: string 42 | description: Event name. 43 | tag: 44 | type: string 45 | description: Tag. 46 | messages: 47 | event: 48 | name: Event 49 | title: Event 50 | summary: >- 51 | Inform about Event. 52 | contentType: application/json 53 | payload: 54 | $ref: "#/components/schemas/eventPayload" 55 | -------------------------------------------------------------------------------- /grpc.echo/.github/test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -x 3 | 4 | EXIT=0 5 | 6 | # GIVEN 7 | PORT="7151" 8 | INPUT='{"message":"Hello World"}' 9 | EXPECTED='{ 10 | "message": "Hello World" 11 | }' 12 | echo \# Testing grpc.echo/example.EchoService.EchoUnary 13 | echo PORT="$PORT" 14 | echo INPUT="$INPUT" 15 | echo EXPECTED="$EXPECTED" 16 | echo 17 | 18 | # WHEN 19 | OUTPUT=$(docker compose run --rm grpcurl -plaintext -proto echo.proto -d "$INPUT" zilla.examples.dev:$PORT example.EchoService.EchoUnary) 20 | RESULT=$? 21 | echo RESULT="$RESULT" 22 | # THEN 23 | echo OUTPUT="$OUTPUT" 24 | echo EXPECTED="$EXPECTED" 25 | echo 26 | if [ "$RESULT" -eq 0 ] && [ "$OUTPUT" = "$EXPECTED" ]; then 27 | echo ✅ 28 | else 29 | echo ❌ 30 | EXIT=1 31 | fi 32 | 33 | # GIVEN 34 | PORT="7151" 35 | INPUT='{"message":"Hello World"}' 36 | EXPECTED='{ 37 | "message": "Hello World" 38 | }' 39 | echo \# Testing grpc.echo/example.EchoService.EchoBidiStream 40 | echo PORT="$PORT" 41 | echo INPUT="$INPUT" 42 | echo EXPECTED="$EXPECTED" 43 | echo 44 | 45 | # WHEN 46 | OUTPUT=$(docker compose run --rm grpcurl -plaintext -proto echo.proto -d "$INPUT" zilla.examples.dev:$PORT example.EchoService.EchoBidiStream) 47 | RESULT=$? 48 | echo RESULT="$RESULT" 49 | # THEN 50 | echo OUTPUT="$OUTPUT" 51 | echo EXPECTED="$EXPECTED" 52 | echo 53 | if [ "$RESULT" -eq 0 ] && [ "$OUTPUT" = "$EXPECTED" ]; then 54 | echo ✅ 55 | else 56 | echo ❌ 57 | EXIT=1 58 | fi 59 | 60 | exit $EXIT 61 | -------------------------------------------------------------------------------- /grpc.echo/README.md: -------------------------------------------------------------------------------- 1 | # grpc.echo 2 | 3 | Listens on tcp port `7151` and will echo grpc message sent by client. 4 | 5 | ## Requirements 6 | 7 | - docker compose 8 | - [grpcurl](https://github.com/fullstorydev/grpcurl) 9 | - [ghz](https://ghz.sh/docs/install) 10 | 11 | ## Setup 12 | 13 | To `start` the Docker Compose stack defined in the [compose.yaml](compose.yaml) file, use: 14 | 15 | ```bash 16 | docker compose up -d 17 | ``` 18 | 19 | ### Verify behavior 20 | 21 | #### Unary Stream 22 | 23 | Echo `{"message":"Hello World"}` message via unary rpc using `grpcurl` command. 24 | 25 | ```bash 26 | grpcurl -plaintext -proto ./etc/protos/echo.proto -d '{"message":"Hello World"}' \ 27 | localhost:7151 example.EchoService.EchoUnary 28 | ``` 29 | 30 | output: 31 | 32 | ```json 33 | { 34 | "message": "Hello World" 35 | } 36 | ``` 37 | 38 | #### Bidirectional Stream 39 | 40 | Echo messages via bidirectional streaming rpc. 41 | 42 | ```bash 43 | grpcurl -plaintext -proto ./etc/protos/echo.proto -d @ \ 44 | localhost:7151 example.EchoService.EchoBidiStream 45 | ``` 46 | 47 | Paste below message. 48 | 49 | ```json 50 | { 51 | "message": "Hello World" 52 | } 53 | ``` 54 | 55 | ### Bench 56 | 57 | ```bash 58 | ghz --config bench.json \ 59 | --proto ./etc/protos/echo.proto \ 60 | --call example.EchoService/EchoBidiStream \ 61 | localhost:7151 62 | ``` 63 | 64 | ## Teardown 65 | 66 | To remove any resources created by the Docker Compose stack, use: 67 | 68 | ```bash 69 | docker compose down 70 | ``` 71 | -------------------------------------------------------------------------------- /grpc.echo/bench.json: -------------------------------------------------------------------------------- 1 | { 2 | "insecure": true, 3 | "stream-interval": "5ms", 4 | "total": 1000, 5 | "concurrency": 8, 6 | "data": 7 | { 8 | "message": "Hello, world" 9 | }, 10 | "metadata": 11 | { 12 | "trace_id": "{{.RequestNumber}}", 13 | "timestamp": "{{.TimestampUnix}}" 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /grpc.echo/compose.yaml: -------------------------------------------------------------------------------- 1 | name: ${NAMESPACE:-zilla-grpc-echo} 2 | services: 3 | zilla: 4 | image: ghcr.io/aklivity/zilla:${ZILLA_VERSION:-latest} 5 | restart: unless-stopped 6 | hostname: zilla.examples.dev 7 | ports: 8 | - 7151:7151 9 | healthcheck: 10 | interval: 5s 11 | timeout: 3s 12 | retries: 5 13 | test: ["CMD", "bash", "-c", "echo -n '' > /dev/tcp/127.0.0.1/7151"] 14 | volumes: 15 | - ./etc:/etc/zilla 16 | command: start -v -e 17 | 18 | grpcurl: 19 | image: fullstorydev/grpcurl 20 | stdin_open: true 21 | tty: true 22 | profiles: 23 | - on-demand 24 | volumes: 25 | - ./etc/protos/echo.proto:/echo.proto 26 | 27 | networks: 28 | default: 29 | driver: bridge 30 | -------------------------------------------------------------------------------- /grpc.echo/etc/protos/echo.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package example; 4 | 5 | service EchoService 6 | { 7 | rpc EchoUnary(EchoMessage) returns (EchoMessage); 8 | 9 | rpc EchoBidiStream(stream EchoMessage) returns (stream EchoMessage); 10 | } 11 | 12 | message EchoMessage 13 | { 14 | string message = 1; 15 | } 16 | -------------------------------------------------------------------------------- /grpc.echo/etc/zilla.yaml: -------------------------------------------------------------------------------- 1 | name: example 2 | catalogs: 3 | host_filesystem: 4 | type: filesystem 5 | options: 6 | subjects: 7 | echo: 8 | path: protos/echo.proto 9 | bindings: 10 | north_tcp_server: 11 | type: tcp 12 | kind: server 13 | options: 14 | host: 0.0.0.0 15 | port: 16 | - 7151 17 | routes: 18 | - when: 19 | - port: 7151 20 | exit: north_http_server 21 | north_http_server: 22 | type: http 23 | kind: server 24 | options: 25 | versions: 26 | - h2 27 | access-control: 28 | policy: cross-origin 29 | exit: north_grpc_server 30 | north_grpc_server: 31 | type: grpc 32 | kind: server 33 | catalog: 34 | host_filesystem: 35 | - subject: echo 36 | routes: 37 | - when: 38 | - method: example.EchoService/* 39 | exit: north_echo_server 40 | north_echo_server: 41 | type: echo 42 | kind: server 43 | telemetry: 44 | exporters: 45 | stdout_logs_exporter: 46 | type: stdout 47 | -------------------------------------------------------------------------------- /grpc.kafka.echo/.github/test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -x 3 | 4 | EXIT=0 5 | 6 | # GIVEN 7 | PORT="7151" 8 | INPUT='{"message":"Hello World"}' 9 | EXPECTED='{ 10 | "message": "Hello World" 11 | }' 12 | echo \# Testing grpc.kafka.echo/UnaryEcho 13 | echo PORT="$PORT" 14 | echo INPUT="$INPUT" 15 | echo EXPECTED="$EXPECTED" 16 | echo 17 | 18 | # WHEN 19 | OUTPUT=$(docker compose run --rm grpcurl -plaintext -proto echo.proto -d "$INPUT" zilla.examples.dev:$PORT grpc.examples.echo.Echo.UnaryEcho) 20 | RESULT=$? 21 | echo RESULT="$RESULT" 22 | 23 | # THEN 24 | echo OUTPUT="$OUTPUT" 25 | echo EXPECTED="$EXPECTED" 26 | echo 27 | if [ "$RESULT" -eq 0 ] && [ "$OUTPUT" = "$EXPECTED" ]; then 28 | echo ✅ 29 | else 30 | echo ❌ 31 | EXIT=1 32 | fi 33 | 34 | # GIVEN 35 | PORT="7151" 36 | INPUT='{"message":"Hello World"}' 37 | EXPECTED='{ 38 | "message": "Hello World" 39 | }' 40 | echo \# Testing grpc.kafka.echo/BidirectionalStreamingEcho 41 | echo PORT="$PORT" 42 | echo INPUT="$INPUT" 43 | echo EXPECTED="$EXPECTED" 44 | echo 45 | 46 | # WHEN 47 | OUTPUT=$(docker compose run --rm grpcurl -plaintext -proto echo.proto -d "$INPUT" zilla.examples.dev:$PORT grpc.examples.echo.Echo.BidirectionalStreamingEcho) 48 | RESULT=$? 49 | echo RESULT="$RESULT" 50 | 51 | # THEN 52 | echo OUTPUT="$OUTPUT" 53 | echo EXPECTED="$EXPECTED" 54 | echo 55 | if [ "$RESULT" -eq 0 ] && [ "$OUTPUT" = "$EXPECTED" ]; then 56 | echo ✅ 57 | else 58 | echo ❌ 59 | EXIT=1 60 | fi 61 | 62 | exit $EXIT 63 | -------------------------------------------------------------------------------- /grpc.kafka.echo/bench.json: -------------------------------------------------------------------------------- 1 | { 2 | "insecure": true, 3 | "stream-interval": "5ms", 4 | "total": 1000, 5 | "concurrency": 8, 6 | "data": 7 | { 8 | "message": "Hello, world" 9 | }, 10 | "metadata": 11 | { 12 | "trace_id": "{{.RequestNumber}}", 13 | "timestamp": "{{.TimestampUnix}}" 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /grpc.kafka.echo/etc/protos/echo.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | option go_package = "google.golang.org/grpc/examples/features/proto/echo"; 3 | 4 | package grpc.examples.echo; 5 | 6 | // EchoRequest is the request for echo. 7 | message EchoRequest { 8 | string message = 1; 9 | } 10 | 11 | // EchoResponse is the response for echo. 12 | message EchoResponse { 13 | string message = 1; 14 | } 15 | 16 | // Echo is the echo service. 17 | service Echo { 18 | // UnaryEcho is unary echo. 19 | rpc UnaryEcho(EchoRequest) returns (EchoResponse) {} 20 | // ServerStreamingEcho is server side streaming. 21 | rpc ServerStreamingEcho(EchoRequest) returns (stream EchoResponse) {} 22 | // ClientStreamingEcho is client side streaming. 23 | rpc ClientStreamingEcho(stream EchoRequest) returns (EchoResponse) {} 24 | // BidirectionalStreamingEcho is bidi streaming. 25 | rpc BidirectionalStreamingEcho(stream EchoRequest) returns (stream EchoResponse) {} 26 | } 27 | -------------------------------------------------------------------------------- /grpc.kafka.echo/etc/zilla.yaml: -------------------------------------------------------------------------------- 1 | name: example 2 | catalogs: 3 | host_filesystem: 4 | type: filesystem 5 | options: 6 | subjects: 7 | echo: 8 | path: protos/echo.proto 9 | bindings: 10 | north_tcp_server: 11 | type: tcp 12 | kind: server 13 | options: 14 | host: 0.0.0.0 15 | port: 16 | - 7151 17 | routes: 18 | - when: 19 | - port: 7151 20 | exit: north_http_server 21 | north_http_server: 22 | type: http 23 | kind: server 24 | options: 25 | versions: 26 | - h2 27 | access-control: 28 | policy: cross-origin 29 | exit: north_grpc_server 30 | north_grpc_server: 31 | type: grpc 32 | kind: server 33 | catalog: 34 | host_filesystem: 35 | - subject: echo 36 | routes: 37 | - when: 38 | - method: grpc.examples.echo.Echo/* 39 | exit: north_grpc_kafka_mapping 40 | north_grpc_kafka_mapping: 41 | type: grpc-kafka 42 | kind: proxy 43 | routes: 44 | - when: 45 | - method: grpc.examples.echo.Echo/* 46 | exit: north_kafka_cache_client 47 | with: 48 | capability: produce 49 | topic: echo-messages 50 | acks: leader_only 51 | reply-to: echo-messages 52 | north_kafka_cache_client: 53 | type: kafka 54 | kind: cache_client 55 | exit: south_kafka_cache_server 56 | south_kafka_cache_server: 57 | type: kafka 58 | kind: cache_server 59 | options: 60 | bootstrap: 61 | - echo-messages 62 | exit: south_kafka_client 63 | south_kafka_client: 64 | type: kafka 65 | kind: client 66 | options: 67 | servers: 68 | - ${{env.KAFKA_BOOTSTRAP_SERVER}} 69 | exit: south_tcp_client 70 | south_tcp_client: 71 | type: tcp 72 | kind: client 73 | telemetry: 74 | exporters: 75 | stdout_logs_exporter: 76 | type: stdout 77 | -------------------------------------------------------------------------------- /grpc.kafka.fanout/.github/test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -x 3 | 4 | EXIT=0 5 | 6 | # GIVEN 7 | PORT="7151" 8 | 9 | EXPECTED='{ 10 | "message": "test" 11 | }' 12 | echo \# Testing grpc.kafka.fanout/example.FanoutService.FanoutServerStream 13 | echo PORT="$PORT" 14 | echo INPUT="$INPUT" 15 | echo EXPECTED="$EXPECTED" 16 | echo 17 | 18 | # WHEN 19 | 20 | sleep 5 21 | 22 | (docker compose -p zilla-grpc-kafka-fanout exec kafkacat kafkacat -P -b kafka.examples.dev:29092 -t messages -k -e /tmp/binary.data) 23 | 24 | OUTPUT=$(echo "EXIT" | timeout 3s docker compose run --rm grpcurl -plaintext -proto fanout.proto -d '' zilla.examples.dev:$PORT example.FanoutService.FanoutServerStream) 25 | RESULT=$? 26 | echo RESULT="$RESULT" 27 | # THEN 28 | echo OUTPUT="$OUTPUT" 29 | echo EXPECTED="$EXPECTED" 30 | echo 31 | if [ "$OUTPUT" = "$EXPECTED" ]; then 32 | echo ✅ 33 | else 34 | echo ❌ 35 | EXIT=1 36 | fi 37 | 38 | exit $EXIT 39 | -------------------------------------------------------------------------------- /grpc.kafka.fanout/.gitignore: -------------------------------------------------------------------------------- 1 | binary.data 2 | -------------------------------------------------------------------------------- /grpc.kafka.fanout/README.md: -------------------------------------------------------------------------------- 1 | # grpc.kafka.fanout 2 | 3 | Listens on https port `7151` and fanout messages from `messages` topic in Kafka. 4 | 5 | ## Requirements 6 | 7 | - protoc 8 | - docker compose 9 | - [grpcurl](https://github.com/fullstorydev/grpcurl) 10 | 11 | ## Setup 12 | 13 | To `start` the Docker Compose stack defined in the [compose.yaml](compose.yaml) file, use: 14 | 15 | ```bash 16 | docker compose up -d 17 | ``` 18 | 19 | ### Verify behavior 20 | 21 | #### Unreliable server streaming 22 | 23 | Produce protobuf message to Kafka topic, repeat to produce multiple messages. 24 | 25 | ```bash 26 | docker compose -p zilla-grpc-kafka-fanout exec kafkacat \ 27 | kafkacat -P -b kafka.examples.dev:29092 -t messages -k -e /tmp/binary.data 28 | ``` 29 | 30 | Stream messages via server streaming rpc. 31 | 32 | ```bash 33 | grpcurl -plaintext -proto ./etc/protos/fanout.proto -d '' \ 34 | localhost:7151 example.FanoutService.FanoutServerStream 35 | ``` 36 | 37 | output: 38 | 39 | ```json 40 | { 41 | "message": "test" 42 | } 43 | ``` 44 | 45 | This output repeats for each message produced to Kafka. 46 | 47 | #### Reliable server streaming 48 | 49 | Build the reliable streaming client which uses `32767` field as last message id to send as metadata to resume streaming from last received message. 50 | 51 | ```bash 52 | cd grpc.reliable.streaming/ 53 | ./mvnw clean install 54 | cd .. 55 | ``` 56 | 57 | Connect with the reliable streaming client. 58 | 59 | ```bash 60 | java -jar grpc.reliable.streaming/target/grpc-example-develop-SNAPSHOT-jar-with-dependencies.jar 61 | ``` 62 | 63 | output: 64 | 65 | ```text 66 | ... 67 | INFO: Found message: message: "test" 68 | 32767: "\001\002\000\002" 69 | ``` 70 | 71 | Simulate connection loss by stopping the `zilla` service in the `docker` stack. 72 | 73 | ```bash 74 | docker compose -p zilla-grpc-kafka-fanout stop zilla 75 | ``` 76 | 77 | Simulate connection recovery by starting the `zilla` service again. 78 | 79 | ```bash 80 | docker compose -p zilla-grpc-kafka-fanout start zilla 81 | ``` 82 | 83 | Then produce another protobuf message to Kafka, repeat to produce multiple messages. 84 | 85 | ```bash 86 | docker compose -p zilla-grpc-kafka-fanout exec kafkacat \ 87 | kafkacat -P -b kafka.examples.dev:29092 -t messages -k -e /tmp/binary.data 88 | ``` 89 | 90 | The reliable streaming client will recover and zilla deliver only the new message. 91 | 92 | ```text 93 | ... 94 | INFO: Found message: message: "test" 95 | 32767: "\001\002\000\004" 96 | ``` 97 | 98 | This output repeats for each message produced to Kafka after the zilla service is restart. 99 | 100 | ## Teardown 101 | 102 | To remove any resources created by the Docker Compose stack, use: 103 | 104 | ```bash 105 | docker compose down 106 | ``` 107 | -------------------------------------------------------------------------------- /grpc.kafka.fanout/binary.data: -------------------------------------------------------------------------------- 1 | 2 | test -------------------------------------------------------------------------------- /grpc.kafka.fanout/etc/protos/fanout.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package example; 4 | 5 | import "google/protobuf/empty.proto"; 6 | 7 | service FanoutService 8 | { 9 | rpc FanoutServerStream(google.protobuf.Empty) returns (stream FanoutMessage); 10 | } 11 | 12 | message FanoutMessage 13 | { 14 | string message = 1; 15 | } 16 | -------------------------------------------------------------------------------- /grpc.kafka.fanout/etc/zilla.yaml: -------------------------------------------------------------------------------- 1 | name: example 2 | catalogs: 3 | host_filesystem: 4 | type: filesystem 5 | options: 6 | subjects: 7 | fanout: 8 | path: protos/fanout.proto 9 | bindings: 10 | north_tcp_server: 11 | type: tcp 12 | kind: server 13 | options: 14 | host: 0.0.0.0 15 | port: 16 | - 7151 17 | routes: 18 | - when: 19 | - port: 7151 20 | exit: north_http_server 21 | north_http_server: 22 | type: http 23 | kind: server 24 | options: 25 | versions: 26 | - h2 27 | access-control: 28 | policy: cross-origin 29 | exit: north_grpc_server 30 | north_grpc_server: 31 | type: grpc 32 | kind: server 33 | catalog: 34 | host_filesystem: 35 | - subject: fanout 36 | routes: 37 | - when: 38 | - method: example.FanoutService/* 39 | exit: north_grpc_kafka_mapping 40 | north_grpc_kafka_mapping: 41 | type: grpc-kafka 42 | kind: proxy 43 | routes: 44 | - when: 45 | - method: example.FanoutService/FanoutServerStream 46 | exit: north_kafka_cache_client 47 | with: 48 | capability: fetch 49 | topic: messages 50 | north_kafka_cache_client: 51 | type: kafka 52 | kind: cache_client 53 | exit: south_kafka_cache_server 54 | south_kafka_cache_server: 55 | type: kafka 56 | kind: cache_server 57 | exit: south_kafka_client 58 | south_kafka_client: 59 | type: kafka 60 | kind: client 61 | options: 62 | servers: 63 | - ${{env.KAFKA_BOOTSTRAP_SERVER}} 64 | exit: south_tcp_client 65 | south_tcp_client: 66 | type: tcp 67 | kind: client 68 | telemetry: 69 | exporters: 70 | stdout_logs_exporter: 71 | type: stdout 72 | -------------------------------------------------------------------------------- /grpc.kafka.fanout/grpc.reliable.streaming/.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | node_modules 3 | /target 4 | /dist 5 | .idea 6 | 7 | # local env files 8 | .env.local 9 | .env.*.local 10 | 11 | # Log files 12 | npm-debug.log* 13 | yarn-debug.log* 14 | yarn-error.log* 15 | pnpm-debug.log* 16 | 17 | # Editor directories and files 18 | .idea 19 | .vscode 20 | *.suo 21 | *.ntvs* 22 | *.njsproj 23 | *.sln 24 | *.sw? 25 | *.iml 26 | .mvn/timing.properties 27 | .mvn/wrapper/maven-wrapper.jar 28 | -------------------------------------------------------------------------------- /grpc.kafka.fanout/grpc.reliable.streaming/.mvn/wrapper/maven-wrapper.properties: -------------------------------------------------------------------------------- 1 | distributionUrl=https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.8.4/apache-maven-3.8.4-bin.zip 2 | wrapperUrl=https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.5/maven-wrapper-0.5.5.jar 3 | -------------------------------------------------------------------------------- /grpc.kafka.fanout/grpc.reliable.streaming/COPYRIGHT: -------------------------------------------------------------------------------- 1 | Copyright ${copyrightYears} Aklivity. All rights reserved. 2 | -------------------------------------------------------------------------------- /grpc.kafka.fanout/grpc.reliable.streaming/src/main/java/io/aklivity/zilla/example/grpc/reliable/streaming/LastMessageIdInterceptor.java: -------------------------------------------------------------------------------- 1 | package io.aklivity.zilla.example.grpc.reliable.streaming; 2 | 3 | import java.nio.ByteBuffer; 4 | import java.util.logging.Level; 5 | import java.util.logging.Logger; 6 | 7 | import io.grpc.CallOptions; 8 | import io.grpc.Channel; 9 | import io.grpc.ClientCall; 10 | import io.grpc.ClientInterceptor; 11 | import io.grpc.ForwardingClientCall; 12 | import io.grpc.ForwardingClientCallListener; 13 | import io.grpc.Metadata; 14 | import io.grpc.MethodDescriptor; 15 | 16 | public class LastMessageIdInterceptor implements ClientInterceptor 17 | { 18 | private static final int LAST_MESSAGE_FIELD_ID = 32767; 19 | private static final Logger LOGGER = Logger.getLogger(LastMessageIdInterceptor.class.getName()); 20 | 21 | private byte[] lastMessageId; 22 | 23 | public ClientCall interceptCall( 24 | final MethodDescriptor methodDescriptor, 25 | final CallOptions callOptions, 26 | final Channel channel) 27 | { 28 | return new ForwardingClientCall.SimpleForwardingClientCall<>( 29 | channel.newCall(methodDescriptor, callOptions)) 30 | { 31 | 32 | @Override 33 | public void start(Listener responseListener, Metadata headers) 34 | { 35 | if (lastMessageId != null) 36 | { 37 | headers.put(Metadata.Key.of("last-message-id-bin", Metadata.BINARY_BYTE_MARSHALLER), lastMessageId); 38 | } 39 | 40 | super.start( 41 | new ForwardingClientCallListener.SimpleForwardingClientCallListener<>( 42 | responseListener) 43 | { 44 | @Override 45 | public void onMessage(RespT message) 46 | { 47 | LOGGER.log(Level.FINE, "Received response from Server: {}", message); 48 | ByteBuffer buffer = ((FanoutMessage) message).getUnknownFields() 49 | .getField(LAST_MESSAGE_FIELD_ID) 50 | .getLengthDelimitedList().get(0).asReadOnlyByteBuffer(); 51 | lastMessageId = new byte[buffer.capacity()]; 52 | buffer.get(lastMessageId, 0, buffer.capacity()); 53 | super.onMessage(message); 54 | } 55 | }, 56 | headers); 57 | } 58 | }; 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /grpc.kafka.fanout/grpc.reliable.streaming/src/main/proto/fanout.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package example; 4 | 5 | import "google/protobuf/empty.proto"; 6 | 7 | option java_multiple_files = true; 8 | option java_package = "io.aklivity.zilla.example.grpc.reliable.streaming"; 9 | option java_outer_classname = "FanoutProto"; 10 | 11 | service FanoutService 12 | { 13 | rpc FanoutServerStream(google.protobuf.Empty) returns (stream FanoutMessage); 14 | } 15 | 16 | message FanoutMessage 17 | { 18 | string message = 1; 19 | } 20 | -------------------------------------------------------------------------------- /grpc.kafka.fanout/grpc.reliable.streaming/src/main/resources/io/aklivity/zilla/example/grpc/reliable/streaming/certs/test-ca.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIFXjCCA0YCCQCuorYrG5wG+DANBgkqhkiG9w0BAQsFADBxMQswCQYDVQQGEwJV 3 | UzETMBEGA1UECAwKQ2FsaWZvcm5pYTESMBAGA1UEBwwJUGFsbyBBbHRvMREwDwYD 4 | VQQKDAhBa2xpdml0eTEUMBIGA1UECwwLRGV2ZWxvcG1lbnQxEDAOBgNVBAMMB1Rl 5 | c3QgQ0EwHhcNMjExMjIxMjMwNDExWhcNMzExMjE5MjMwNDExWjBxMQswCQYDVQQG 6 | EwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTESMBAGA1UEBwwJUGFsbyBBbHRvMREw 7 | DwYDVQQKDAhBa2xpdml0eTEUMBIGA1UECwwLRGV2ZWxvcG1lbnQxEDAOBgNVBAMM 8 | B1Rlc3QgQ0EwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDGPVgVO/zd 9 | ebwGWujKymJmztWZ5LIaZC+zY1SwKUBUA3+vrtO79ndi6WePiV0a2e7wov/ajFLp 10 | mor2RfGSMD8Yb9e98QSqnfy9Q5+ABmxFulgSJNwDjnxugZuk/6MILKMg7AsgqaxK 11 | wROSSLcom8b+gkbwXgHm57RKiitXlRM9ujdKibeHwfu7JTk6A7LwRbCVurTRqckw 12 | Q0/mA4mNuZ2AMGW+YL36TwTLfTAa4AVHEbI3U5+TyY3DoV7OoHI4Ec1/7B0CGzqK 13 | smKM3dKmXpRIc5NBZt+eKqphAhp0CD1eAnutWtepahjWyY1fAYk9hZ+ayU52dAMf 14 | +TbkPdMn5jfHhqs95VdfQjsKZPyNTYjhjHN9tAph1wKUG4XRATAvxhA2gpYgN9de 15 | 9ztWPboVzGosauQxPrXklO8CF7hsft0RlCCP9ojVLUkZ42vI/M1S3lD8pCDtPe46 16 | 2zQ9S3F1R7goF3AqWm4EQqu237+zL45pCbbWyyHeXHeDrv3DNXHcWXoFicNmCBl6 17 | nPPsVn9qgdhmJf5QcUKLkJEEtk94Uedv5qEqiJQYSPAIZHKnv4L5Li69kghTbUv/ 18 | Xquz2JdY5daj5eRurgZVjutkmMIaR4rJdhifBonlcKxoeSZoVbnoGzS5KcF9saz8 19 | 9qYU9LtF98CUMY7U4RPlVbA8D4YwDICgcwIDAQABMA0GCSqGSIb3DQEBCwUAA4IC 20 | AQDEzoEbCsHpae5M1I+vezg7w5NAItMU4WU9IccTylSB/gfIT+hWwIv9KiqTWjxw 21 | Y5Aj6XJ1mATHAMSQnNZCnP2Hw39/Nc3HcKmek2na2zK/TBSEFXudJmox8SK32r26 22 | nLstNlcYf7ixqJ5T7SOE2GJOcEUWpvTSbvQD0NvG81BVnSyUfX3FgkQLwwlyBoSE 23 | 7FwFz+ybrbisUHHqzPVnSblEDbKv6T9ai3FjbBegzPVSd9RmtB/DzxhdSk+kL1oD 24 | VSEPweSHEqamEnq2RIgLb7rYhmfohl0fGF5W6I3LvLqqe0KLRRID9V/jwBUGyICG 25 | W3jGu+68jOIUqXA4+gfOwXNktd4F7So48ySbghgrY0Umr4KSs6CTHhvSZ4ZG8QO/ 26 | ZyC+DjXsU3mihIBP/Q43YU7dYxFSdlCw79YnXvdWu7K7lZ1bIcbdH+RShcbvPcwg 27 | iM2qAvCgZBA8xHMDQeev8QdQjxtN+uBfee0mkvbzPbIh/0prywPHjAie/bXVBPVt 28 | VK6Gej2egPCIA5ThvGpmXh8kPd5Aqy1J++cmrzfYfPPsbmPGTLI0HFMhUuzIhFbd 29 | TzAV/Qj83r722s6f0v3KEEhfi3EZu3bRSGIyxVtebtOLGvEb2PjJrktyVJgivVFX 30 | uHHpz76QFOcLy1F962Hfj51NnIROOySyl12JkhPRTlMoiQ== 31 | -----END CERTIFICATE----- 32 | -------------------------------------------------------------------------------- /grpc.kafka.fanout/grpc.reliable.streaming/src/main/resources/io/aklivity/zilla/example/grpc/reliable/streaming/config/retrying_service_config.json: -------------------------------------------------------------------------------- 1 | { 2 | "methodConfig": [ 3 | { 4 | "name": [ 5 | { 6 | "service": "example.FanoutService", 7 | "method": "FanoutServerStream" 8 | } 9 | ], 10 | 11 | "retryPolicy": { 12 | "maxAttempts": 5, 13 | "initialBackoff": "0.5s", 14 | "maxBackoff": "30s", 15 | "backoffMultiplier": 2, 16 | "retryableStatusCodes": [ 17 | "UNAVAILABLE" 18 | ] 19 | } 20 | } 21 | ] 22 | } 23 | -------------------------------------------------------------------------------- /grpc.kafka.proxy/.github/test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -x 3 | 4 | EXIT=0 5 | 6 | # GIVEN 7 | PORT="7151" 8 | INPUT='{"message":"Hello World"}' 9 | EXPECTED='{ 10 | "message": "Hello World" 11 | }' 12 | echo \# Testing grpc.kafka.proxy/grpc.examples.echo.Echo.UnaryEcho 13 | echo PORT="$PORT" 14 | echo INPUT="$INPUT" 15 | echo EXPECTED="$EXPECTED" 16 | echo 17 | 18 | # WHEN 19 | OUTPUT=$(docker compose run --rm grpcurl -plaintext -proto echo.proto -d "$INPUT" zilla.examples.dev:$PORT grpc.examples.echo.Echo.UnaryEcho) 20 | RESULT=$? 21 | echo RESULT="$RESULT" 22 | # THEN 23 | echo OUTPUT="$OUTPUT" 24 | echo EXPECTED="$EXPECTED" 25 | echo 26 | if [ "$RESULT" -eq 0 ] && [ "$OUTPUT" = "$EXPECTED" ]; then 27 | echo ✅ 28 | else 29 | echo ❌ 30 | EXIT=1 31 | fi 32 | 33 | # GIVEN 34 | PORT="7151" 35 | INPUT='{"message":"Hello World"}' 36 | EXPECTED='{ 37 | "message": "Hello World" 38 | } 39 | { 40 | "message": "Hello World" 41 | } 42 | { 43 | "message": "Hello World" 44 | } 45 | { 46 | "message": "Hello World" 47 | } 48 | { 49 | "message": "Hello World" 50 | } 51 | { 52 | "message": "Hello World" 53 | } 54 | { 55 | "message": "Hello World" 56 | } 57 | { 58 | "message": "Hello World" 59 | } 60 | { 61 | "message": "Hello World" 62 | } 63 | { 64 | "message": "Hello World" 65 | }' 66 | echo \# Testing grpc.kafka.proxy/grpc.examples.echo.Echo.ServerStreamingEcho 67 | echo PORT="$PORT" 68 | echo INPUT="$INPUT" 69 | echo EXPECTED="$EXPECTED" 70 | echo 71 | 72 | # WHEN 73 | OUTPUT=$(docker compose run --rm grpcurl -plaintext -proto echo.proto -d "$INPUT" zilla.examples.dev:$PORT grpc.examples.echo.Echo.ServerStreamingEcho) 74 | RESULT=$? 75 | echo RESULT="$RESULT" 76 | # THEN 77 | echo OUTPUT="$OUTPUT" 78 | echo EXPECTED="$EXPECTED" 79 | echo 80 | if [ "$RESULT" -eq 0 ] && [ "$OUTPUT" = "$EXPECTED" ]; then 81 | echo ✅ 82 | else 83 | echo ❌ 84 | EXIT=1 85 | fi 86 | 87 | 88 | # GIVEN 89 | PORT="7151" 90 | INPUT='{"message":"Hello World"}' 91 | EXPECTED='{ 92 | "message": "Hello World" 93 | }' 94 | echo \# Testing grpc.kafka.proxy/grpc.examples.echo.Echo.BidirectionalStreamingEcho 95 | echo PORT="$PORT" 96 | echo INPUT="$INPUT" 97 | echo EXPECTED="$EXPECTED" 98 | echo 99 | 100 | # WHEN 101 | OUTPUT=$(docker compose run --rm grpcurl -plaintext -proto echo.proto -d "$INPUT" zilla.examples.dev:$PORT grpc.examples.echo.Echo.BidirectionalStreamingEcho) 102 | RESULT=$? 103 | echo RESULT="$RESULT" 104 | # THEN 105 | echo OUTPUT="$OUTPUT" 106 | echo EXPECTED="$EXPECTED" 107 | echo 108 | if [ "$RESULT" -eq 0 ] && [ "$OUTPUT" = "$EXPECTED" ]; then 109 | echo ✅ 110 | else 111 | echo ❌ 112 | EXIT=1 113 | fi 114 | 115 | exit $EXIT 116 | -------------------------------------------------------------------------------- /grpc.kafka.proxy/etc/protos/echo.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | option go_package = "google.golang.org/grpc/examples/features/proto/echo"; 3 | 4 | package grpc.examples.echo; 5 | 6 | // EchoRequest is the request for echo. 7 | message EchoRequest { 8 | string message = 1; 9 | } 10 | 11 | // EchoResponse is the response for echo. 12 | message EchoResponse { 13 | string message = 1; 14 | } 15 | 16 | // Echo is the echo service. 17 | service Echo { 18 | // UnaryEcho is unary echo. 19 | rpc UnaryEcho(EchoRequest) returns (EchoResponse) {} 20 | // ServerStreamingEcho is server side streaming. 21 | rpc ServerStreamingEcho(EchoRequest) returns (stream EchoResponse) {} 22 | // ClientStreamingEcho is client side streaming. 23 | rpc ClientStreamingEcho(stream EchoRequest) returns (EchoResponse) {} 24 | // BidirectionalStreamingEcho is bidi streaming. 25 | rpc BidirectionalStreamingEcho(stream EchoRequest) returns (stream EchoResponse) {} 26 | } 27 | -------------------------------------------------------------------------------- /grpc.kafka.proxy/etc/zilla.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: example 3 | catalogs: 4 | host_filesystem: 5 | type: filesystem 6 | options: 7 | subjects: 8 | echo: 9 | path: protos/echo.proto 10 | bindings: 11 | north_tcp_server: 12 | type: tcp 13 | kind: server 14 | options: 15 | host: 0.0.0.0 16 | port: 17 | - 7151 18 | routes: 19 | - when: 20 | - port: 7151 21 | exit: north_http_server 22 | north_http_server: 23 | type: http 24 | kind: server 25 | options: 26 | versions: 27 | - h2 28 | access-control: 29 | policy: cross-origin 30 | exit: north_grpc_server 31 | north_grpc_server: 32 | type: grpc 33 | kind: server 34 | catalog: 35 | host_filesystem: 36 | - subject: echo 37 | routes: 38 | - when: 39 | - method: grpc.examples.echo.Echo/* 40 | exit: north_grpc_kafka_mapping 41 | north_grpc_kafka_mapping: 42 | type: grpc-kafka 43 | kind: proxy 44 | routes: 45 | - when: 46 | - method: grpc.examples.echo.Echo/* 47 | exit: north_kafka_cache_client 48 | with: 49 | capability: produce 50 | topic: echo-requests 51 | acks: leader_only 52 | reply-to: echo-responses 53 | north_kafka_cache_client: 54 | type: kafka 55 | kind: cache_client 56 | exit: south_kafka_cache_server 57 | south_kafka_cache_server: 58 | type: kafka 59 | kind: cache_server 60 | exit: south_kafka_client 61 | south_kafka_client: 62 | type: kafka 63 | kind: client 64 | options: 65 | servers: 66 | - ${{env.KAFKA_BOOTSTRAP_SERVER}} 67 | exit: south_tcp_client 68 | south_tcp_client: 69 | type: tcp 70 | kind: client 71 | west_kafka_grpc_remote_server: 72 | type: kafka-grpc 73 | kind: remote_server 74 | entry: north_kafka_cache_client 75 | options: 76 | acks: leader_only 77 | routes: 78 | - exit: west_grpc_client 79 | when: 80 | - topic: echo-requests 81 | reply-to: echo-responses 82 | method: grpc.examples.echo.Echo/* 83 | with: 84 | scheme: http 85 | authority: ${{env.ECHO_SERVER_HOST}}:${{env.ECHO_SERVER_PORT}} 86 | west_grpc_client: 87 | type: grpc 88 | kind: client 89 | routes: 90 | - exit: west_http_client 91 | when: 92 | - method: grpc.examples.echo.Echo/* 93 | west_http_client: 94 | type: http 95 | kind: client 96 | options: 97 | versions: 98 | - h2 99 | exit: west_tcp_client 100 | west_tcp_client: 101 | type: tcp 102 | kind: client 103 | options: 104 | host: ${{env.ECHO_SERVER_HOST}} 105 | port: ${{env.ECHO_SERVER_PORT}} 106 | telemetry: 107 | exporters: 108 | stdout_logs_exporter: 109 | type: stdout 110 | -------------------------------------------------------------------------------- /grpc.proxy/.github/test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -x 3 | 4 | EXIT=0 5 | 6 | # GIVEN 7 | PORT="7153" 8 | INPUT='{"message":"Hello World"}' 9 | EXPECTED='{ 10 | "message": "Hello World" 11 | }' 12 | echo \# Testing grpc.kafka.proxy/grpc.examples.echo.Echo.UnaryEcho 13 | echo PORT="$PORT" 14 | echo INPUT="$INPUT" 15 | echo EXPECTED="$EXPECTED" 16 | echo 17 | 18 | # WHEN 19 | OUTPUT=$(docker compose run --rm grpcurl -insecure -proto echo.proto -d "$INPUT" zilla.examples.dev:$PORT grpc.examples.echo.Echo.UnaryEcho) 20 | RESULT=$? 21 | echo RESULT="$RESULT" 22 | # THEN 23 | echo OUTPUT="$OUTPUT" 24 | echo EXPECTED="$EXPECTED" 25 | echo 26 | if [ "$RESULT" -eq 0 ] && [ "$OUTPUT" = "$EXPECTED" ]; then 27 | echo ✅ 28 | else 29 | echo ❌ 30 | EXIT=1 31 | fi 32 | 33 | # GIVEN 34 | PORT="7153" 35 | INPUT='{"message":"Hello World"}' 36 | EXPECTED='{ 37 | "message": "Hello World" 38 | } 39 | { 40 | "message": "Hello World" 41 | } 42 | { 43 | "message": "Hello World" 44 | } 45 | { 46 | "message": "Hello World" 47 | } 48 | { 49 | "message": "Hello World" 50 | } 51 | { 52 | "message": "Hello World" 53 | } 54 | { 55 | "message": "Hello World" 56 | } 57 | { 58 | "message": "Hello World" 59 | } 60 | { 61 | "message": "Hello World" 62 | } 63 | { 64 | "message": "Hello World" 65 | }' 66 | echo \# Testing grpc.kafka.proxy/grpc.examples.echo.Echo.ServerStreamingEcho 67 | echo PORT="$PORT" 68 | echo INPUT="$INPUT" 69 | echo EXPECTED="$EXPECTED" 70 | echo 71 | 72 | # WHEN 73 | OUTPUT=$(docker compose run --rm grpcurl -insecure -proto echo.proto -d "$INPUT" zilla.examples.dev:$PORT grpc.examples.echo.Echo.ServerStreamingEcho) 74 | RESULT=$? 75 | echo RESULT="$RESULT" 76 | # THEN 77 | echo OUTPUT="$OUTPUT" 78 | echo EXPECTED="$EXPECTED" 79 | echo 80 | if [ "$RESULT" -eq 0 ] && [ "$OUTPUT" = "$EXPECTED" ]; then 81 | echo ✅ 82 | else 83 | echo ❌ 84 | EXIT=1 85 | fi 86 | 87 | exit $EXIT 88 | -------------------------------------------------------------------------------- /grpc.proxy/README.md: -------------------------------------------------------------------------------- 1 | # grpc.proxy 2 | 3 | Listens on https port `7151` and will echo back whatever is published to `grpc-echo` on tcp port `50051`. 4 | 5 | ## Requirements 6 | 7 | - docker compose 8 | - [grpcurl](https://github.com/fullstorydev/grpcurl) 9 | 10 | ## Setup 11 | 12 | To `start` the Docker Compose stack defined in the [compose.yaml](compose.yaml) file, use: 13 | 14 | ```bash 15 | docker compose up -d 16 | ``` 17 | 18 | ### Verify behavior 19 | 20 | #### Unary Stream 21 | 22 | Echo `{"message":"Hello World"}` message via unary rpc. 23 | 24 | ```bash 25 | grpcurl -insecure -proto ./etc/protos/echo.proto -d '{"message":"Hello World"}' \ 26 | localhost:7153 grpc.examples.echo.Echo.UnaryEcho 27 | ``` 28 | 29 | output: 30 | 31 | ```json 32 | { 33 | "message": "Hello World" 34 | } 35 | ``` 36 | 37 | #### Bidirectional streaming 38 | 39 | Echo messages via bidirectional streaming rpc. 40 | 41 | ```bash 42 | grpcurl -insecure -proto ./etc/protos/echo.proto -d @ \ 43 | localhost:7153 grpc.examples.echo.Echo.BidirectionalStreamingEcho 44 | ``` 45 | 46 | Paste below message. 47 | 48 | ```json 49 | { 50 | "message": "Hello World" 51 | } 52 | ``` 53 | 54 | ## Teardown 55 | 56 | To remove any resources created by the Docker Compose stack, use: 57 | 58 | ```bash 59 | docker compose down 60 | ``` 61 | -------------------------------------------------------------------------------- /grpc.proxy/compose.yaml: -------------------------------------------------------------------------------- 1 | name: ${NAMESPACE:-zilla-grpc-proxy} 2 | services: 3 | zilla: 4 | image: ghcr.io/aklivity/zilla:${ZILLA_VERSION:-latest} 5 | restart: unless-stopped 6 | hostname: zilla.examples.dev 7 | ports: 8 | - 7151:7151 9 | - 7153:7153 10 | healthcheck: 11 | interval: 5s 12 | timeout: 3s 13 | retries: 5 14 | test: ["CMD", "bash", "-c", "echo -n '' > /dev/tcp/127.0.0.1/7153"] 15 | environment: 16 | KEYSTORE_PASSWORD: generated 17 | ECHO_SERVER_HOST: grpc-echo 18 | ECHO_SERVER_PORT: 50051 19 | volumes: 20 | - ./etc:/etc/zilla 21 | command: start -v -e 22 | 23 | grpc-echo: 24 | image: ghcr.io/aklivity/extras-grpc-echo-server:sha-478b2ce 25 | command: ["--port", "50051"] 26 | ports: 27 | - 50051:50051 28 | healthcheck: 29 | test: ["CMD", "nc", "-z", "127.0.0.1", "50051"] 30 | interval: 10s 31 | timeout: 5s 32 | retries: 5 33 | 34 | grpcurl: 35 | image: fullstorydev/grpcurl 36 | stdin_open: true 37 | tty: true 38 | profiles: 39 | - on-demand 40 | volumes: 41 | - ./etc/protos/echo.proto:/echo.proto 42 | 43 | networks: 44 | default: 45 | driver: bridge 46 | -------------------------------------------------------------------------------- /grpc.proxy/etc/protos/echo.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | option go_package = "google.golang.org/grpc/examples/features/proto/echo"; 3 | 4 | package grpc.examples.echo; 5 | 6 | // EchoRequest is the request for echo. 7 | message EchoRequest { 8 | string message = 1; 9 | } 10 | 11 | // EchoResponse is the response for echo. 12 | message EchoResponse { 13 | string message = 1; 14 | } 15 | 16 | // Echo is the echo service. 17 | service Echo { 18 | // UnaryEcho is unary echo. 19 | rpc UnaryEcho(EchoRequest) returns (EchoResponse) {} 20 | // ServerStreamingEcho is server side streaming. 21 | rpc ServerStreamingEcho(EchoRequest) returns (stream EchoResponse) {} 22 | // ClientStreamingEcho is client side streaming. 23 | rpc ClientStreamingEcho(stream EchoRequest) returns (EchoResponse) {} 24 | // BidirectionalStreamingEcho is bidi streaming. 25 | rpc BidirectionalStreamingEcho(stream EchoRequest) returns (stream EchoResponse) {} 26 | } 27 | -------------------------------------------------------------------------------- /grpc.proxy/etc/tls/localhost.p12: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aklivity/zilla-examples/7a5307d97d4a9e1fd731ac4fc3230f0b9e53e36e/grpc.proxy/etc/tls/localhost.p12 -------------------------------------------------------------------------------- /grpc.proxy/etc/zilla.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: example 3 | catalogs: 4 | host_filesystem: 5 | type: filesystem 6 | options: 7 | subjects: 8 | echo: 9 | path: protos/echo.proto 10 | vaults: 11 | my_servers: 12 | type: filesystem 13 | options: 14 | keys: 15 | store: tls/localhost.p12 16 | type: pkcs12 17 | password: ${{env.KEYSTORE_PASSWORD}} 18 | bindings: 19 | north_tcp_server: 20 | type: tcp 21 | kind: server 22 | options: 23 | host: 0.0.0.0 24 | port: 25 | - 7151 26 | - 7153 27 | routes: 28 | - when: 29 | - port: 7151 30 | exit: north_http_server 31 | - when: 32 | - port: 7153 33 | exit: north_tls_server 34 | north_tls_server: 35 | type: tls 36 | kind: server 37 | vault: my_servers 38 | options: 39 | keys: 40 | - localhost 41 | sni: 42 | - localhost 43 | alpn: 44 | - h2 45 | exit: north_http_server 46 | north_http_server: 47 | type: http 48 | kind: server 49 | options: 50 | versions: 51 | - h2 52 | access-control: 53 | policy: cross-origin 54 | exit: north_grpc_server 55 | north_grpc_server: 56 | type: grpc 57 | kind: server 58 | catalog: 59 | host_filesystem: 60 | - subject: echo 61 | routes: 62 | - when: 63 | - method: grpc.examples.echo.Echo/* 64 | exit: south_grpc_client 65 | south_grpc_client: 66 | type: grpc 67 | kind: client 68 | exit: south_http_client 69 | south_http_client: 70 | type: http 71 | kind: client 72 | options: 73 | versions: 74 | - h2 75 | exit: south_tcp_client 76 | south_tcp_client: 77 | type: tcp 78 | kind: client 79 | options: 80 | host: ${{env.ECHO_SERVER_HOST}} 81 | port: ${{env.ECHO_SERVER_PORT}} 82 | telemetry: 83 | exporters: 84 | stdout_logs_exporter: 85 | type: stdout 86 | -------------------------------------------------------------------------------- /http.filesystem/.github/test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -x 3 | 4 | EXIT=0 5 | 6 | # GIVEN 7 | PORT="7114" 8 | EXPECTED=' 9 | 10 | Welcome to Zilla! 11 | 12 | 13 |

Welcome to Zilla!

14 | 15 | ' 16 | echo \# Testing http.filesystem/ 17 | echo PORT="$PORT" 18 | echo EXPECTED="$EXPECTED" 19 | echo 20 | 21 | # WHEN 22 | OUTPUT=$(curl http://localhost:$PORT/index.html) 23 | RESULT=$? 24 | echo RESULT="$RESULT" 25 | 26 | # THEN 27 | echo OUTPUT="$OUTPUT" 28 | echo EXPECTED="$EXPECTED" 29 | echo 30 | if [ "$RESULT" -eq 0 ] && [ "$OUTPUT" = "$EXPECTED" ]; then 31 | echo ✅ 32 | else 33 | echo ❌ 34 | EXIT=1 35 | fi 36 | 37 | exit $EXIT 38 | -------------------------------------------------------------------------------- /http.filesystem/README.md: -------------------------------------------------------------------------------- 1 | # http.filesystem 2 | 3 | Listens on http port `7114` and serves files from the Zilla container's `/var/www` subdirectory. 4 | 5 | ## Requirements 6 | 7 | - docker compose 8 | 9 | ## Setup 10 | 11 | To `start` the Docker Compose stack defined in the [compose.yaml](compose.yaml) file, use: 12 | 13 | ```bash 14 | docker compose up -d 15 | ``` 16 | 17 | ### Verify behavior 18 | 19 | ```bash 20 | curl http://localhost:7114/index.html 21 | ``` 22 | 23 | output: 24 | 25 | ```html 26 | 27 | 28 | Welcome to Zilla! 29 | 30 | 31 |

Welcome to Zilla!

32 | 33 | 34 | ``` 35 | 36 | ## Teardown 37 | 38 | To remove any resources created by the Docker Compose stack, use: 39 | 40 | ```bash 41 | docker compose down 42 | ``` 43 | -------------------------------------------------------------------------------- /http.filesystem/compose.yaml: -------------------------------------------------------------------------------- 1 | name: ${NAMESPACE:-zilla-http-filesystem} 2 | services: 3 | zilla: 4 | image: ghcr.io/aklivity/zilla:${ZILLA_VERSION:-latest} 5 | restart: unless-stopped 6 | hostname: zilla.examples.dev 7 | ports: 8 | - 7114:7114 9 | healthcheck: 10 | interval: 5s 11 | timeout: 3s 12 | retries: 5 13 | test: ["CMD", "bash", "-c", "echo -n '' > /dev/tcp/127.0.0.1/7114"] 14 | volumes: 15 | - ./etc:/etc/zilla 16 | - ./www:/var/www 17 | command: start -v -e 18 | 19 | networks: 20 | default: 21 | driver: bridge 22 | -------------------------------------------------------------------------------- /http.filesystem/etc/zilla.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: example 3 | bindings: 4 | north_tcp_server: 5 | type: tcp 6 | kind: server 7 | options: 8 | host: 0.0.0.0 9 | port: 10 | - 7114 11 | routes: 12 | - when: 13 | - port: 7114 14 | exit: north_http_server 15 | north_http_server: 16 | type: http 17 | kind: server 18 | routes: 19 | - when: 20 | - headers: 21 | :scheme: http 22 | exit: east_http_filesystem_mapping 23 | east_http_filesystem_mapping: 24 | type: http-filesystem 25 | kind: proxy 26 | routes: 27 | - when: 28 | - path: /{path} 29 | exit: east_filesystem_server 30 | with: 31 | path: ${params.path} 32 | east_filesystem_server: 33 | type: filesystem 34 | kind: server 35 | options: 36 | location: /var/www/ 37 | telemetry: 38 | exporters: 39 | stdout_logs_exporter: 40 | type: stdout 41 | -------------------------------------------------------------------------------- /http.filesystem/www/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | Welcome to Zilla! 4 | 5 | 6 |

Welcome to Zilla!

7 | 8 | 9 | -------------------------------------------------------------------------------- /http.json.schema/.github/test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -x 3 | 4 | EXIT=0 5 | 6 | # GIVEN 7 | PORT="7114" 8 | EXPECTED='{ 9 | "id": 42, 10 | "status": "Active" 11 | }' 12 | echo \# Testing http.json.schema/valid.json 13 | echo PORT="$PORT" 14 | echo EXPECTED="$EXPECTED" 15 | echo 16 | 17 | # WHEN 18 | OUTPUT=$(curl http://localhost:$PORT/valid.json) 19 | RESULT=$? 20 | echo RESULT="$RESULT" 21 | 22 | # THEN 23 | echo OUTPUT="$OUTPUT" 24 | echo EXPECTED="$EXPECTED" 25 | echo 26 | if [ "$RESULT" -eq 0 ] && [ "$OUTPUT" = "$EXPECTED" ]; then 27 | echo ✅ 28 | else 29 | echo ❌ 30 | EXIT=1 31 | fi 32 | 33 | 34 | # GIVEN 35 | PORT="7114" 36 | echo \# Testing http.json.schema/invalid.json 37 | echo PORT="$PORT" 38 | 39 | echo 40 | 41 | # WHEN 42 | OUTPUT=$(curl http://localhost:$PORT/invalid.json) 43 | RESULT=$? 44 | echo RESULT="$RESULT" 45 | 46 | # THEN 47 | echo 48 | if [ "$RESULT" -eq 18 ]; then 49 | echo ✅ 50 | else 51 | echo ❌ 52 | EXIT=1 53 | fi 54 | 55 | exit $EXIT 56 | -------------------------------------------------------------------------------- /http.json.schema/README.md: -------------------------------------------------------------------------------- 1 | # http.json.schema 2 | 3 | Listens on https port `7114` and will response back whatever is hosted in `nginx` on that path after enforcing validation. 4 | 5 | ## Requirements 6 | 7 | - docker compose 8 | 9 | ## Setup 10 | 11 | To `start` the Docker Compose stack defined in the [compose.yaml](compose.yaml) file, use: 12 | 13 | ```bash 14 | docker compose up -d 15 | ``` 16 | 17 | ### Verify behavior for valid content 18 | 19 | ```bash 20 | curl http://localhost:7114/valid.json 21 | ``` 22 | 23 | output: 24 | 25 | ```text 26 | { 27 | "id": 42, 28 | "status": "Active" 29 | } 30 | ``` 31 | 32 | ### Verify behavior for invalid content 33 | 34 | ```bash 35 | curl http://localhost:7114/invalid.json 36 | ``` 37 | 38 | output: 39 | 40 | ```text 41 | curl: (18) transfer closed with 37 bytes remaining to read 42 | ``` 43 | 44 | ## Teardown 45 | 46 | To remove any resources created by the Docker Compose stack, use: 47 | 48 | ```bash 49 | docker compose down 50 | ``` 51 | -------------------------------------------------------------------------------- /http.json.schema/compose.yaml: -------------------------------------------------------------------------------- 1 | name: ${NAMESPACE:-zilla-http-json-schema} 2 | services: 3 | zilla: 4 | image: ghcr.io/aklivity/zilla:${ZILLA_VERSION:-latest} 5 | restart: unless-stopped 6 | hostname: zilla.examples.dev 7 | ports: 8 | - 7114:7114 9 | healthcheck: 10 | interval: 5s 11 | timeout: 3s 12 | retries: 5 13 | test: ["CMD", "bash", "-c", "echo -n '' > /dev/tcp/127.0.0.1/7114"] 14 | volumes: 15 | - ./etc:/etc/zilla 16 | command: start -v -e 17 | 18 | nginx: 19 | image: nginx:stable 20 | restart: unless-stopped 21 | ports: 22 | - 80:80 23 | volumes: 24 | - ./nginx.conf:/etc/nginx/nginx.conf 25 | - ./www:/usr/share/nginx/html 26 | 27 | networks: 28 | default: 29 | driver: bridge 30 | -------------------------------------------------------------------------------- /http.json.schema/nginx.conf: -------------------------------------------------------------------------------- 1 | user nginx; 2 | worker_processes 1; 3 | 4 | error_log /var/log/nginx/error.log warn; 5 | pid /var/run/nginx.pid; 6 | 7 | events { 8 | worker_connections 1024; 9 | } 10 | 11 | http { 12 | include /etc/nginx/mime.types; 13 | default_type application/octet-stream; 14 | 15 | log_format main '$remote_addr - $remote_user [$time_local] "$request" ' 16 | '$status $body_bytes_sent "$http_referer" ' 17 | '"$http_user_agent" "$http_x_forwarded_for"'; 18 | 19 | access_log /var/log/nginx/access.log main; 20 | 21 | sendfile on; 22 | 23 | keepalive_timeout 65; 24 | 25 | server { 26 | listen 80; 27 | listen [::]:80; 28 | 29 | root /usr/share/nginx/html; 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /http.json.schema/www/demo.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | This is the title of the webpage! 5 | 6 | 7 | 8 |

This is an example paragraph. Anything in the body tag will appear on the page, just like this p tag and its contents.

9 | 10 | 11 | -------------------------------------------------------------------------------- /http.json.schema/www/invalid.json: -------------------------------------------------------------------------------- 1 | { 2 | "id": 42, 3 | "status": "my status", 4 | "name": "test" 5 | } 6 | -------------------------------------------------------------------------------- /http.json.schema/www/style.css: -------------------------------------------------------------------------------- 1 | body { 2 | background-color: powderblue; 3 | } 4 | h1 { 5 | color: blue; 6 | } 7 | p { 8 | color: red; 9 | } 10 | -------------------------------------------------------------------------------- /http.json.schema/www/valid.json: -------------------------------------------------------------------------------- 1 | { 2 | "id": 42, 3 | "status": "Active" 4 | } 5 | -------------------------------------------------------------------------------- /http.kafka.async/.github/test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -x 3 | 4 | EXIT=0 5 | 6 | # GIVEN 7 | PORT="7114" 8 | KAFKA_BOOTSTRAP_SERVER="kafka.examples.dev:29092" 9 | ITEM_ID="$(date +%s)" 10 | GREETING="Hello, World! $ITEM_ID" 11 | GREETING_DATE="Hello, World! $(date)" 12 | EXPECTED="{\"greeting\":\"$GREETING_DATE\"}" 13 | 14 | echo \# Testing http.kafka.async/ 15 | echo PORT="$PORT" 16 | echo KAFKA_BOOTSTRAP_SERVER="$KAFKA_BOOTSTRAP_SERVER" 17 | echo ITEM_ID="$ITEM_ID" 18 | echo GREETING="$GREETING" 19 | echo GREETING_DATE="$GREETING_DATE" 20 | echo 21 | 22 | # WHEN 23 | # send request to zilla 24 | timeout 60s curl \ 25 | -X "PUT" http://localhost:$PORT/items/$ITEM_ID \ 26 | -H "Idempotency-Key: $ITEM_ID" \ 27 | -H "Content-Type: application/json" \ 28 | -d "{\"greeting\":\"$GREETING\"}" | tee .testoutput & 29 | 30 | # fetch correlation id from kafka with kafkacat; try 5 times 31 | for i in $(seq 0 5); do 32 | sleep $i 33 | CORRELATION_ID=$(docker compose -p zilla-http-kafka-async exec kafkacat kafkacat -C -c 1 -o-1 -b $KAFKA_BOOTSTRAP_SERVER -t items-requests -J -u | jq -r '.headers | index("zilla:correlation-id") as $index | .[$index + 1]') 34 | if [ -n "$CORRELATION_ID" ]; then 35 | break 36 | fi 37 | done 38 | echo CORRELATION_ID="$CORRELATION_ID" 39 | if [ -z "$CORRELATION_ID" ]; then 40 | echo ❌ 41 | EXIT=1 42 | fi 43 | 44 | # push response to kafka with kafkacat 45 | echo "{\"greeting\":\"$GREETING_DATE\"}" | 46 | docker compose -p zilla-http-kafka-async exec -T kafkacat \ 47 | kafkacat -P \ 48 | -b $KAFKA_BOOTSTRAP_SERVER \ 49 | -t items-responses \ 50 | -k "$ITEM_ID" \ 51 | -H ":status=200" \ 52 | -H "zilla:correlation-id=$CORRELATION_ID" 53 | 54 | # fetch the output of zilla request; try 5 times 55 | for i in $(seq 0 5); do 56 | sleep $i 57 | OUTPUT=$(cat .testoutput) 58 | if [ -n "$OUTPUT" ]; then 59 | break 60 | fi 61 | done 62 | rm .testoutput 63 | 64 | # THEN 65 | echo OUTPUT="$OUTPUT" 66 | echo EXPECTED="$EXPECTED" 67 | echo 68 | if [ "$OUTPUT" = "$EXPECTED" ]; then 69 | echo ✅ 70 | else 71 | echo ❌ 72 | EXIT=1 73 | fi 74 | 75 | exit $EXIT 76 | -------------------------------------------------------------------------------- /http.kafka.async/etc/zilla.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: example 3 | bindings: 4 | north_tcp_server: 5 | type: tcp 6 | kind: server 7 | options: 8 | host: 0.0.0.0 9 | port: 10 | - 7114 11 | routes: 12 | - when: 13 | - port: 7114 14 | exit: north_http_server 15 | north_http_server: 16 | type: http 17 | kind: server 18 | routes: 19 | - when: 20 | - headers: 21 | :scheme: http 22 | exit: north_http_kafka_mapping 23 | north_http_kafka_mapping: 24 | type: http-kafka 25 | kind: proxy 26 | routes: 27 | - when: 28 | - method: PUT 29 | path: /items/{id} 30 | - method: GET 31 | path: /items/{id};cid={correlationId} 32 | exit: north_kafka_cache_client 33 | with: 34 | capability: produce 35 | topic: items-requests 36 | key: ${params.id} 37 | reply-to: items-responses 38 | async: 39 | location: /items/${params.id};cid=${correlationId} 40 | north_kafka_cache_client: 41 | type: kafka 42 | kind: cache_client 43 | exit: south_kafka_cache_server 44 | south_kafka_cache_server: 45 | type: kafka 46 | kind: cache_server 47 | options: 48 | bootstrap: 49 | - items-responses 50 | exit: south_kafka_client 51 | south_kafka_client: 52 | type: kafka 53 | kind: client 54 | options: 55 | servers: 56 | - ${{env.KAFKA_BOOTSTRAP_SERVER}} 57 | exit: south_tcp_client 58 | south_tcp_client: 59 | type: tcp 60 | kind: client 61 | telemetry: 62 | exporters: 63 | stdout_logs_exporter: 64 | type: stdout 65 | -------------------------------------------------------------------------------- /http.kafka.avro.json/.github/test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -x 3 | 4 | EXIT=0 5 | 6 | # create schema 7 | for i in $(seq 1 5); do 8 | RESPONSE=$(curl -s --header "Content-Type: application/json" --data '{ 9 | "schema": 10 | "{\"fields\":[{\"name\":\"id\",\"type\":\"string\"},{\"name\":\"status\",\"type\":\"string\"}],\"name\":\"Event\",\"namespace\":\"io.aklivity.example\",\"type\":\"record\"}", 11 | "schemaType": "AVRO" 12 | }' "http://localhost:8081/subjects/items-snapshots-value/versions") 13 | 14 | if [ "$RESPONSE" = '{"id":1}' ]; then 15 | break 16 | fi 17 | 18 | sleep 2 19 | done 20 | 21 | # GIVEN 22 | PORT="7114" 23 | INPUT='{"id": "123", "status": "OK"}' 24 | EXPECTED='{"id":"123","status":"OK"}' 25 | echo \# Testing http.kafka.avro.json/valid 26 | echo PORT="$PORT" 27 | echo INPUT="$INPUT" 28 | echo EXPECTED="$EXPECTED" 29 | echo 30 | 31 | # send message 32 | curl -k http://localhost:7114/items -H 'Idempotency-Key: 1' -H 'Content-Type: application/json' -d "$INPUT" 33 | 34 | # WHEN 35 | OUTPUT=$(curl -k http://localhost:$PORT/items/1) 36 | RESULT=$? 37 | echo RESULT="$RESULT" 38 | 39 | # THEN 40 | echo OUTPUT="$OUTPUT" 41 | echo EXPECTED="$EXPECTED" 42 | echo 43 | if [ "$RESULT" -eq 0 ] && [ "$OUTPUT" = "$EXPECTED" ]; then 44 | echo ✅ 45 | else 46 | echo ❌ 47 | EXIT=1 48 | fi 49 | 50 | # GIVEN 51 | PORT="7114" 52 | INPUT='{"id": 123,"status": "OK"}' 53 | EXPECTED='404' 54 | echo \# Testing http.kafka.avro.json/invalid 55 | echo PORT="$PORT" 56 | echo INPUT="$INPUT" 57 | echo EXPECTED="$EXPECTED" 58 | echo 59 | 60 | # send message 61 | curl -k http://localhost:7114/items -H 'Idempotency-Key: 2' -H 'Content-Type: application/json' -d "$INPUT" 62 | 63 | # WHEN 64 | OUTPUT=$(curl -w "%{http_code}" http://localhost:$PORT/items/2) 65 | RESULT=$? 66 | echo RESULT="$RESULT" 67 | 68 | # THEN 69 | echo OUTPUT="$OUTPUT" 70 | echo EXPECTED="$EXPECTED" 71 | echo 72 | if [ "$RESULT" -eq 0 ] && [ "$OUTPUT" = "$EXPECTED" ]; then 73 | echo ✅ 74 | else 75 | echo ❌ 76 | EXIT=1 77 | fi 78 | 79 | exit $EXIT 80 | -------------------------------------------------------------------------------- /http.kafka.cache/.github/test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -x 3 | 4 | EXIT=0 5 | 6 | # GIVEN 7 | PORT="7114" 8 | EXPECTED="[]" 9 | echo \# Testing http.kafka.cache/ 10 | echo PORT="$PORT" 11 | echo INPUT="$INPUT" 12 | echo EXPECTED="$EXPECTED" 13 | echo 14 | 15 | # WHEN 16 | OUTPUT=$(curl http://localhost:$PORT/items) 17 | RESULT=$? 18 | echo RESULT="$RESULT" 19 | 20 | # THEN 21 | echo OUTPUT="$OUTPUT" 22 | echo EXPECTED="$EXPECTED" 23 | echo 24 | if [ "$RESULT" -eq 0 ] && [ "$OUTPUT" = "$EXPECTED" ]; then 25 | echo ✅ 26 | else 27 | echo ❌ 28 | EXIT=1 29 | fi 30 | 31 | 32 | # GIVEN 33 | PORT="7114" 34 | INPUT='{"message":"Hello World"}' 35 | EXPECTED='[{"message":"Hello World"}]' 36 | echo \# Testing http.kafka.cache/ 37 | echo PORT="$PORT" 38 | echo INPUT="$INPUT" 39 | echo EXPECTED="$EXPECTED" 40 | echo 41 | 42 | echo "$INPUT" | docker compose -p zilla-http-kafka-cache exec -T kafkacat \ 43 | kafkacat -P \ 44 | -b kafka.examples.dev:29092 \ 45 | -t items-snapshots \ 46 | -k "5cf7a1d5-3772-49ef-86e7-ba6f2c7d7d07" \ 47 | -H "content-type=application/json" 48 | 49 | # WHEN 50 | OUTPUT=$(curl http://localhost:$PORT/items) 51 | RESULT=$? 52 | echo RESULT="$RESULT" 53 | 54 | # THEN 55 | echo OUTPUT="$OUTPUT" 56 | echo EXPECTED="$EXPECTED" 57 | echo 58 | if [ "$RESULT" -eq 0 ] && [ "$OUTPUT" = "$EXPECTED" ]; then 59 | echo ✅ 60 | else 61 | echo ❌ 62 | EXIT=1 63 | fi 64 | 65 | exit $EXIT 66 | -------------------------------------------------------------------------------- /http.kafka.cache/compose.yaml: -------------------------------------------------------------------------------- 1 | name: ${NAMESPACE:-zilla-http-kafka-cache} 2 | services: 3 | zilla: 4 | image: ghcr.io/aklivity/zilla:${ZILLA_VERSION:-latest} 5 | restart: unless-stopped 6 | hostname: zilla.examples.dev 7 | ports: 8 | - 7114:7114 9 | healthcheck: 10 | interval: 5s 11 | timeout: 3s 12 | retries: 5 13 | test: ["CMD", "bash", "-c", "echo -n '' > /dev/tcp/127.0.0.1/7114"] 14 | environment: 15 | KAFKA_BOOTSTRAP_SERVER: kafka.examples.dev:29092 16 | volumes: 17 | - ./etc:/etc/zilla 18 | command: start -v -e 19 | 20 | kafka: 21 | image: bitnami/kafka:3.5 22 | restart: unless-stopped 23 | hostname: kafka.examples.dev 24 | ports: 25 | - 9092:9092 26 | healthcheck: 27 | test: /opt/bitnami/kafka/bin/kafka-cluster.sh cluster-id --bootstrap-server kafka.examples.dev:29092 || exit 1 28 | interval: 1s 29 | timeout: 60s 30 | retries: 60 31 | environment: 32 | ALLOW_PLAINTEXT_LISTENER: "yes" 33 | KAFKA_CFG_NODE_ID: "1" 34 | KAFKA_CFG_BROKER_ID: "1" 35 | KAFKA_CFG_GROUP_INITIAL_REBALANCE_DELAY_MS: "0" 36 | KAFKA_CFG_CONTROLLER_QUORUM_VOTERS: "1@127.0.0.1:9093" 37 | KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP: "CLIENT:PLAINTEXT,INTERNAL:PLAINTEXT,CONTROLLER:PLAINTEXT" 38 | KAFKA_CFG_CONTROLLER_LISTENER_NAMES: "CONTROLLER" 39 | KAFKA_CFG_LOG_DIRS: "/tmp/logs" 40 | KAFKA_CFG_PROCESS_ROLES: "broker,controller" 41 | KAFKA_CFG_LISTENERS: "CLIENT://:9092,INTERNAL://:29092,CONTROLLER://:9093" 42 | KAFKA_CFG_INTER_BROKER_LISTENER_NAME: "INTERNAL" 43 | KAFKA_CFG_ADVERTISED_LISTENERS: "CLIENT://localhost:9092,INTERNAL://kafka.examples.dev:29092" 44 | KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE: "true" 45 | 46 | kafka-init: 47 | image: bitnami/kafka:3.5 48 | user: root 49 | depends_on: 50 | kafka: 51 | condition: service_healthy 52 | restart: true 53 | deploy: 54 | restart_policy: 55 | condition: none 56 | max_attempts: 0 57 | entrypoint: ["/bin/sh", "-c"] 58 | environment: 59 | KAFKA_BOOTSTRAP_SERVER: kafka.examples.dev:29092 60 | command: 61 | - | 62 | echo -e "Creating kafka topic"; 63 | /opt/bitnami/kafka/bin/kafka-topics.sh --bootstrap-server $${KAFKA_BOOTSTRAP_SERVER} --create --if-not-exists --topic items-snapshots --config cleanup.policy=compact 64 | echo -e "Successfully created the following topics:"; 65 | /opt/bitnami/kafka/bin/kafka-topics.sh --bootstrap-server $${KAFKA_BOOTSTRAP_SERVER} --list; 66 | 67 | kafka-ui: 68 | image: ghcr.io/kafbat/kafka-ui:v1.0.0 69 | restart: unless-stopped 70 | ports: 71 | - 8080:8080 72 | depends_on: 73 | kafka: 74 | condition: service_healthy 75 | restart: true 76 | environment: 77 | KAFKA_CLUSTERS_0_NAME: local 78 | KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka.examples.dev:29092 79 | 80 | kafkacat: 81 | image: confluentinc/cp-kafkacat:7.1.9 82 | command: "bash" 83 | stdin_open: true 84 | tty: true 85 | 86 | networks: 87 | default: 88 | driver: bridge 89 | -------------------------------------------------------------------------------- /http.kafka.cache/etc/zilla.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: example 3 | bindings: 4 | north_tcp_server: 5 | type: tcp 6 | kind: server 7 | options: 8 | host: 0.0.0.0 9 | port: 10 | - 7114 11 | routes: 12 | - when: 13 | - port: 7114 14 | exit: north_http_server 15 | north_http_server: 16 | type: http 17 | kind: server 18 | routes: 19 | - when: 20 | - headers: 21 | :scheme: http 22 | exit: north_http_kafka_mapping 23 | north_http_kafka_mapping: 24 | type: http-kafka 25 | kind: proxy 26 | routes: 27 | - when: 28 | - method: GET 29 | path: /items 30 | exit: north_kafka_cache_client 31 | with: 32 | capability: fetch 33 | topic: items-snapshots 34 | merge: 35 | content-type: application/json 36 | - exit: north_kafka_cache_client 37 | when: 38 | - method: GET 39 | path: /items/{id} 40 | with: 41 | capability: fetch 42 | topic: items-snapshots 43 | filters: 44 | - key: ${params.id} 45 | north_kafka_cache_client: 46 | type: kafka 47 | kind: cache_client 48 | exit: south_kafka_cache_server 49 | south_kafka_cache_server: 50 | type: kafka 51 | kind: cache_server 52 | options: 53 | bootstrap: 54 | - items-snapshots 55 | exit: south_kafka_client 56 | south_kafka_client: 57 | type: kafka 58 | kind: client 59 | options: 60 | servers: 61 | - ${{env.KAFKA_BOOTSTRAP_SERVER}} 62 | exit: south_tcp_client 63 | south_tcp_client: 64 | type: tcp 65 | kind: client 66 | telemetry: 67 | exporters: 68 | stdout_logs_exporter: 69 | type: stdout 70 | -------------------------------------------------------------------------------- /http.kafka.crud/.github/test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -x 3 | 4 | EXIT=0 5 | 6 | # GIVEN 7 | PORT="7114" 8 | INPUT='{"greeting":"Hello, world1"}' 9 | EXPECTED='{"greeting":"Hello, world1"}' 10 | echo \# Testing http.kafka.crud/ 11 | echo PORT="$PORT" 12 | echo INPUT="$INPUT" 13 | echo EXPECTED="$EXPECTED" 14 | echo 15 | 16 | curl -k -v -X POST http://localhost:$PORT/items -H 'Idempotency-Key: 1' -H 'Content-Type: application/json' -d "$INPUT" 17 | 18 | # WHEN 19 | OUTPUT=$(curl -k http://localhost:$PORT/items/1) 20 | RESULT=$? 21 | echo RESULT="$RESULT" 22 | 23 | # THEN 24 | echo OUTPUT="$OUTPUT" 25 | echo EXPECTED="$EXPECTED" 26 | echo 27 | if [ "$RESULT" -eq 0 ] && [ "$OUTPUT" = "$EXPECTED" ]; then 28 | echo ✅ 29 | else 30 | echo ❌ 31 | EXIT=1 32 | fi 33 | 34 | exit $EXIT 35 | -------------------------------------------------------------------------------- /http.kafka.crud/compose.yaml: -------------------------------------------------------------------------------- 1 | name: ${NAMESPACE:-zilla-http-kafka-crud} 2 | services: 3 | zilla: 4 | image: ghcr.io/aklivity/zilla:${ZILLA_VERSION:-latest} 5 | restart: unless-stopped 6 | hostname: zilla.examples.dev 7 | ports: 8 | - 7114:7114 9 | healthcheck: 10 | interval: 5s 11 | timeout: 3s 12 | retries: 5 13 | test: ["CMD", "bash", "-c", "echo -n '' > /dev/tcp/127.0.0.1/7114"] 14 | environment: 15 | KAFKA_BOOTSTRAP_SERVER: kafka.examples.dev:29092 16 | volumes: 17 | - ./etc:/etc/zilla 18 | command: start -v -e 19 | 20 | kafka: 21 | image: bitnami/kafka:3.5 22 | restart: unless-stopped 23 | hostname: kafka.examples.dev 24 | ports: 25 | - 9092:9092 26 | healthcheck: 27 | test: /opt/bitnami/kafka/bin/kafka-cluster.sh cluster-id --bootstrap-server kafka.examples.dev:29092 || exit 1 28 | interval: 1s 29 | timeout: 60s 30 | retries: 60 31 | environment: 32 | ALLOW_PLAINTEXT_LISTENER: "yes" 33 | KAFKA_CFG_NODE_ID: "1" 34 | KAFKA_CFG_BROKER_ID: "1" 35 | KAFKA_CFG_GROUP_INITIAL_REBALANCE_DELAY_MS: "0" 36 | KAFKA_CFG_CONTROLLER_QUORUM_VOTERS: "1@127.0.0.1:9093" 37 | KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP: "CLIENT:PLAINTEXT,INTERNAL:PLAINTEXT,CONTROLLER:PLAINTEXT" 38 | KAFKA_CFG_CONTROLLER_LISTENER_NAMES: "CONTROLLER" 39 | KAFKA_CFG_LOG_DIRS: "/tmp/logs" 40 | KAFKA_CFG_PROCESS_ROLES: "broker,controller" 41 | KAFKA_CFG_LISTENERS: "CLIENT://:9092,INTERNAL://:29092,CONTROLLER://:9093" 42 | KAFKA_CFG_INTER_BROKER_LISTENER_NAME: "INTERNAL" 43 | KAFKA_CFG_ADVERTISED_LISTENERS: "CLIENT://localhost:9092,INTERNAL://kafka.examples.dev:29092" 44 | KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE: "true" 45 | 46 | kafka-init: 47 | image: bitnami/kafka:3.5 48 | user: root 49 | depends_on: 50 | kafka: 51 | condition: service_healthy 52 | restart: true 53 | deploy: 54 | restart_policy: 55 | condition: none 56 | max_attempts: 0 57 | entrypoint: ["/bin/sh", "-c"] 58 | environment: 59 | KAFKA_BOOTSTRAP_SERVER: kafka.examples.dev:29092 60 | command: 61 | - | 62 | echo -e "Creating kafka topic"; 63 | /opt/bitnami/kafka/bin/kafka-topics.sh --bootstrap-server $${KAFKA_BOOTSTRAP_SERVER} --create --if-not-exists --topic items-snapshots --config cleanup.policy=compact 64 | echo -e "Successfully created the following topics:"; 65 | /opt/bitnami/kafka/bin/kafka-topics.sh --bootstrap-server $${KAFKA_BOOTSTRAP_SERVER} --list; 66 | 67 | kafka-ui: 68 | image: ghcr.io/kafbat/kafka-ui:v1.0.0 69 | restart: unless-stopped 70 | ports: 71 | - 8080:8080 72 | depends_on: 73 | kafka: 74 | condition: service_healthy 75 | restart: true 76 | environment: 77 | KAFKA_CLUSTERS_0_NAME: local 78 | KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka.examples.dev:29092 79 | 80 | kafkacat: 81 | image: confluentinc/cp-kafkacat:7.1.9 82 | command: "bash" 83 | stdin_open: true 84 | tty: true 85 | 86 | networks: 87 | default: 88 | driver: bridge 89 | -------------------------------------------------------------------------------- /http.kafka.crud/etc/zilla.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: example 3 | bindings: 4 | north_tcp_server: 5 | type: tcp 6 | kind: server 7 | options: 8 | host: 0.0.0.0 9 | port: 10 | - 7114 11 | routes: 12 | - when: 13 | - port: 7114 14 | exit: north_http_server 15 | north_http_server: 16 | type: http 17 | kind: server 18 | routes: 19 | - when: 20 | - headers: 21 | :scheme: http 22 | exit: north_http_kafka_mapping 23 | north_http_kafka_mapping: 24 | type: http-kafka 25 | kind: proxy 26 | routes: 27 | - when: 28 | - method: POST 29 | path: /items 30 | exit: north_kafka_cache_client 31 | with: 32 | capability: produce 33 | topic: items-snapshots 34 | key: ${idempotencyKey} 35 | - when: 36 | - method: PUT 37 | path: /items/{id} 38 | exit: north_kafka_cache_client 39 | with: 40 | capability: produce 41 | topic: items-snapshots 42 | key: ${params.id} 43 | - when: 44 | - method: DELETE 45 | path: /items/{id} 46 | exit: north_kafka_cache_client 47 | with: 48 | capability: produce 49 | topic: items-snapshots 50 | key: ${params.id} 51 | - when: 52 | - method: GET 53 | path: /items 54 | exit: north_kafka_cache_client 55 | with: 56 | capability: fetch 57 | topic: items-snapshots 58 | merge: 59 | content-type: application/json 60 | - when: 61 | - method: GET 62 | path: /items/{id} 63 | exit: north_kafka_cache_client 64 | with: 65 | capability: fetch 66 | topic: items-snapshots 67 | filters: 68 | - key: ${params.id} 69 | north_kafka_cache_client: 70 | type: kafka 71 | kind: cache_client 72 | exit: south_kafka_cache_server 73 | south_kafka_cache_server: 74 | type: kafka 75 | kind: cache_server 76 | options: 77 | bootstrap: 78 | - items-snapshots 79 | exit: south_kafka_client 80 | south_kafka_client: 81 | type: kafka 82 | kind: client 83 | options: 84 | servers: 85 | - ${{env.KAFKA_BOOTSTRAP_SERVER}} 86 | exit: south_tcp_client 87 | south_tcp_client: 88 | type: tcp 89 | kind: client 90 | telemetry: 91 | exporters: 92 | stdout_logs_exporter: 93 | type: stdout 94 | -------------------------------------------------------------------------------- /http.kafka.oneway/.github/test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -x 3 | 4 | EXIT=0 5 | 6 | # GIVEN 7 | PORT="7114" 8 | INPUT='{"greeting":"Hello, world"}' 9 | EXPECTED="204" 10 | echo \# Testing http.kafka.oneway/ 11 | echo PORT="$PORT" 12 | echo INPUT="$INPUT" 13 | echo EXPECTED="$EXPECTED" 14 | echo 15 | 16 | # WHEN 17 | OUTPUT=$(curl -w "%{http_code}" http://localhost:$PORT/events -H "Content-Type: application/json" -d "$INPUT") 18 | RESULT=$? 19 | echo RESULT="$RESULT" 20 | 21 | # THEN 22 | echo OUTPUT="$OUTPUT" 23 | echo EXPECTED="$EXPECTED" 24 | echo 25 | if [ "$RESULT" -eq 0 ] && [ "$OUTPUT" = "$EXPECTED" ]; then 26 | echo ✅ 27 | else 28 | echo ❌ 29 | EXIT=1 30 | fi 31 | 32 | exit $EXIT 33 | -------------------------------------------------------------------------------- /http.kafka.oneway/README.md: -------------------------------------------------------------------------------- 1 | # http.kafka.oneway 2 | 3 | Listens on http port `7114` or https port `7114` and will produce messages to the `events` topic in Kafka, synchronously. Zilla connects to Kafka using SASL-SCRAM over an SSL encrypted connection. 4 | 5 | ## Requirements 6 | 7 | - jq 8 | - docker compose 9 | 10 | ## Setup 11 | 12 | To `start` the Docker Compose stack defined in the [compose.yaml](compose.yaml) file, use: 13 | 14 | ```bash 15 | docker compose up -d 16 | ``` 17 | 18 | ### Verify behavior 19 | 20 | Send a `POST` request with an event body. 21 | 22 | ```bash 23 | curl -v http://localhost:7114/events -H "Content-Type: application/json" -d '{"greeting":"Hello, world"}' 24 | ``` 25 | 26 | output: 27 | 28 | ```text 29 | ... 30 | > POST /events HTTP/1.1 31 | > Content-Type: application/json 32 | ... 33 | < HTTP/1.1 204 No Content 34 | ``` 35 | 36 | Verify that the event has been produced to the `events` Kafka topic. 37 | 38 | ```bash 39 | docker compose -p zilla-http-kafka-oneway exec kafkacat \ 40 | kafkacat -C -b kafka:29092 -t events -J -u | jq . 41 | ``` 42 | 43 | output: 44 | 45 | ```json 46 | { 47 | "topic": "events", 48 | "partition": 0, 49 | "offset": 0, 50 | "tstype": "create", 51 | "ts": 1652465273281, 52 | "broker": 1001, 53 | "headers": [ 54 | "content-type", 55 | "application/json" 56 | ], 57 | "payload": "{\"greeting\":\"Hello, world\"}" 58 | } 59 | % Reached end of topic events [0] at offset 1 60 | ``` 61 | 62 | ## Teardown 63 | 64 | To remove any resources created by the Docker Compose stack, use: 65 | 66 | ```bash 67 | docker compose down 68 | ``` 69 | -------------------------------------------------------------------------------- /http.kafka.oneway/etc/zilla.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: example 3 | vaults: 4 | your_clients: 5 | type: filesystem 6 | options: 7 | trust: 8 | store: tls/kafka.truststore.jks 9 | type: jks 10 | password: ${{env.KEYSTORE_PASSWORD}} 11 | bindings: 12 | north_tcp_server: 13 | type: tcp 14 | kind: server 15 | options: 16 | host: 0.0.0.0 17 | port: 18 | - 7114 19 | routes: 20 | - when: 21 | - port: 7114 22 | exit: north_http_server 23 | north_http_server: 24 | type: http 25 | kind: server 26 | routes: 27 | - when: 28 | - headers: 29 | :scheme: http 30 | exit: north_http_kafka_mapping 31 | north_http_kafka_mapping: 32 | type: http-kafka 33 | kind: proxy 34 | routes: 35 | - when: 36 | - path: /events 37 | exit: north_kafka_cache_client 38 | with: 39 | capability: produce 40 | topic: events 41 | north_kafka_cache_client: 42 | type: kafka 43 | kind: cache_client 44 | exit: south_kafka_cache_server 45 | south_kafka_cache_server: 46 | type: kafka 47 | kind: cache_server 48 | exit: south_kafka_client 49 | south_kafka_client: 50 | type: kafka 51 | kind: client 52 | options: 53 | servers: 54 | - ${{env.KAFKA_BOOTSTRAP_SERVER}} 55 | sasl: 56 | mechanism: scram-sha-512 57 | username: ${{env.SASL_USERNAME}} 58 | password: ${{env.SASL_PASSWORD}} 59 | exit: south_tls_client 60 | south_tls_client: 61 | type: tls 62 | kind: client 63 | vault: your_clients 64 | options: 65 | trust: 66 | - kafka 67 | sni: 68 | - kafka 69 | alpn: 70 | - h2 71 | exit: south_tcp_client 72 | south_tcp_client: 73 | type: tcp 74 | kind: client 75 | telemetry: 76 | exporters: 77 | stdout_logs_exporter: 78 | type: stdout 79 | -------------------------------------------------------------------------------- /http.kafka.oneway/keystore/kafka.keystore.jks: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aklivity/zilla-examples/7a5307d97d4a9e1fd731ac4fc3230f0b9e53e36e/http.kafka.oneway/keystore/kafka.keystore.jks -------------------------------------------------------------------------------- /http.kafka.oneway/truststore/ca-key: -------------------------------------------------------------------------------- 1 | -----BEGIN ENCRYPTED PRIVATE KEY----- 2 | MIIFHzBJBgkqhkiG9w0BBQ0wPDAbBgkqhkiG9w0BBQwwDgQIq+9AoKSC4JsCAggA 3 | MB0GCWCGSAFlAwQBKgQQsEKnkyEdHetOeaX5A2hKOgSCBNANwAkRPiVacrOXYSM2 4 | deNviB42hUrfc3xh3b/rmg6iJZFQN+YkehC8QzwDw5ySAl/l5gc8zOlIa2SZI7xW 5 | /YQR5WHA26gF9DDhFL1FY7iU/SJ9rmtREBLVhYla56uziW3FAzlOV7OYF6Zw6Ox/ 6 | YWTDOOgvki+sRJ7IXTKqZSOeb0CJlNqxpJTCSxC8XePvH3C8cFsUo3P2hZ5M+q9d 7 | B40ovIJULpDL1dqcw5pKOo/o/eUfjvlUpGGscCSFlazwVvnsnoMRtAxPTQysv0D7 8 | nuJW44iiO9XOX20voxFwC8svxvT0lKxjwIrsUMVx3nMb1rNsbbnd3oUefj0xJU70 9 | 3zdlI2NdRNlmMXGzxXFgr0BH6F3VL4xajA7LlXE41Aoo6JtttmVl8B5YX9WE/1Dx 10 | PG3aLNwdm+Yoc8H6WZpFhpmz0jTbMoUPS55Swe05qg0S+R4MS/lrj5bv3ETSOPUn 11 | BNMvQpRlMphcHowoG3jqPlUR1XFkmSCybGkSaWmm1nDuLbDXgFpw4nQrtVeMxwfM 12 | wvRc8mLcDFpZNPn61/aCgbkPP7O7vtMPnb4oowapCD5A6tSah5g7QyPBVMfUC6j/ 13 | /DP/eUnsug/HZOOOlhayQQsAYkz+pjs4DJTIvhPVzQLqHyA1bgnl2faYk976cy/w 14 | RL4RY5Z5JI7VmZulMadtXuCOd1DjdjRPcs57QbkiLmPLZy4GfVGSyz3yS003LcfP 15 | YsEHPiIdEBXDpA0w2MkNRVtzTBxHApfqWHRCnAh9a/rFW/69upmAQ221PmPNlNJR 16 | cEbTgAEXCIfEvYqz24s/qDnsTvonazY3nFKGdZLOT5c0cfs9S/QJ7tIb893sG67O 17 | 5IXRnpc9kDXhosegtUrj5SA8fDAym9gaSQ9aBzbezJ8Mi6V/Ixc+NN9I4rp0DlIH 18 | mS2QnrUg0NZouX37CjiX4MZr4Pkc1pvf2Mso+hbrleeLgmpE7gBuYJ1PxDPP9/y+ 19 | zlEV/g9wWHSz3vUwzb7JptAofUofGiX1S7jqeQ8+bK6xtxOAqtlOqHEXmODZzRKG 20 | 7Vgepr51/FcK26mnLBXnEmr6D9MCqUhBo3I7lrvlaA6qCvJ0wbjxbGI3/zKkmzFo 21 | nLGdynOBdKTAvtD4r2PcGfRn8Xyn5hjXeG6+acfsTK5bve1IaogmpoYRJ8YcJExA 22 | ndo0jtU+Tfq7Gj8HYOzlMih1i7sSfZTRmQDjJlaYoSmAmmnbiuWnjrStR10L+Wmg 23 | ZBZQqEahsKYh6veY1ROeLtfGWU1reZ6bJb53vpvJyXfINv+paNMjjntuPbrYREXc 24 | bNgMzzH1zybb6NN3DrKNicTi7srXmat4e8Sk+G5G0RD0T4pZYdQfT/LIIhFYWUhT 25 | L2VqfWQQq8oMycoJNnOFMR7ctYOW1m+X1vPPGFrA2yC9FFLzdpzdhp/UtBqVNdBl 26 | zUAxjz4IPDqVeIORFak9c/wlkJlLqKupg1Gc9ALhf71+u+bY66foJ+jL44fGmqWH 27 | PLWGRRC1l9XxVfjfErBVQwQfTP5rIc/dNDXa41Qu0aZnt2ZEVYou1mMoNBPLw7YD 28 | VvpQYeYkIx8ijPjcCTEppwRb/TgAwlKJfw0Pd305c/sS/x8P95skZ2zbD/eNcvbG 29 | 5uJd/tNHPYG3ESJZc+JBri40AQ== 30 | -----END ENCRYPTED PRIVATE KEY----- 31 | -------------------------------------------------------------------------------- /http.kafka.oneway/truststore/kafka.truststore.jks: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aklivity/zilla-examples/7a5307d97d4a9e1fd731ac4fc3230f0b9e53e36e/http.kafka.oneway/truststore/kafka.truststore.jks -------------------------------------------------------------------------------- /http.kafka.proto.json/.github/test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -x 3 | 4 | EXIT=0 5 | 6 | # GIVEN 7 | PORT="7114" 8 | INPUT='{ "message": "hello world", "count": 10 }' 9 | EXPECTED="204" 10 | echo \# Testing http.kafka.proto.json/valid 11 | echo PORT="$PORT" 12 | echo INPUT="$INPUT" 13 | echo EXPECTED="$EXPECTED" 14 | echo 15 | 16 | # WHEN 17 | OUTPUT=$(curl -w "%{http_code}" http://localhost:$PORT/requests -H "Content-Type: application/json" -d "$INPUT") 18 | RESULT=$? 19 | echo RESULT="$RESULT" 20 | 21 | # THEN 22 | echo OUTPUT="$OUTPUT" 23 | echo EXPECTED="$EXPECTED" 24 | echo 25 | if [ "$RESULT" -eq 0 ] && [ "$OUTPUT" = "$EXPECTED" ]; then 26 | echo ✅ 27 | else 28 | echo ❌ 29 | EXIT=1 30 | fi 31 | 32 | # GIVEN 33 | PORT="7114" 34 | INPUT='{ "message": "hello world", "count": 10, "invalid": "field" }' 35 | EXPECTED="400" 36 | echo \# Testing http.kafka.proto.json/invalid 37 | echo PORT="$PORT" 38 | echo INPUT="$INPUT" 39 | echo EXPECTED="$EXPECTED" 40 | echo 41 | 42 | # WHEN 43 | OUTPUT=$(curl -w "%{http_code}" http://localhost:$PORT/requests -H "Content-Type: application/json" -d "$INPUT") 44 | RESULT=$? 45 | echo RESULT="$RESULT" 46 | 47 | # THEN 48 | echo OUTPUT="$OUTPUT" 49 | echo EXPECTED="$EXPECTED" 50 | echo 51 | if [ "$RESULT" -eq 0 ] && [ "$OUTPUT" = "$EXPECTED" ]; then 52 | echo ✅ 53 | else 54 | echo ❌ 55 | EXIT=1 56 | fi 57 | 58 | exit $EXIT 59 | -------------------------------------------------------------------------------- /http.kafka.proto.json/README.md: -------------------------------------------------------------------------------- 1 | # http.kafka.proto.json 2 | 3 | This example allows a protobuf object to be sent to a REST endpoint as JSON that gets validated and converted to the protobuf when it is produced onto Kafka. 4 | 5 | ## Setup 6 | 7 | To `start` the Docker Compose stack defined in the [compose.yaml](compose.yaml) file, use: 8 | 9 | ```bash 10 | docker compose up -d 11 | ``` 12 | 13 | ### Verify behavior for a valid event 14 | 15 | ```bash 16 | curl 'http://localhost:7114/requests' \ 17 | --header 'Content-Type: application/json' \ 18 | --data '{ 19 | "message": "hello message", 20 | "count": 10 21 | }' -v 22 | ``` 23 | 24 | Output: 25 | 26 | ```bash 27 | * Host localhost:7114 was resolved. 28 | ... 29 | > Content-Type: application/json 30 | > Content-Length: 51 31 | > 32 | * upload completely sent off: 51 bytes 33 | < HTTP/1.1 204 No Content 34 | ``` 35 | 36 | ### Verify behavior for Invalid event 37 | 38 | ```bash 39 | curl 'http://localhost:7114/requests' \ 40 | --header 'Content-Type: application/json' \ 41 | --data '{ 42 | "message": "hello world", 43 | "count": 10, 44 | "invalid": "field" 45 | }' -v 46 | ``` 47 | 48 | Output: 49 | 50 | ```bash 51 | * Host localhost:7114 was resolved. 52 | ... 53 | > Content-Type: application/json 54 | > Content-Length: 73 55 | > 56 | * upload completely sent off: 73 bytes 57 | < HTTP/1.1 400 Bad Request 58 | ``` 59 | 60 | ## Teardown 61 | 62 | To remove any resources created by the Docker Compose stack, use: 63 | 64 | ```bash 65 | docker compose down 66 | ``` 67 | -------------------------------------------------------------------------------- /http.kafka.proto.json/compose.yaml: -------------------------------------------------------------------------------- 1 | name: ${NAMESPACE:-zilla-http-kafka-proto-json} 2 | services: 3 | zilla: 4 | image: ghcr.io/aklivity/zilla:${ZILLA_VERSION:-latest} 5 | restart: unless-stopped 6 | hostname: zilla.examples.dev 7 | ports: 8 | - 7114:7114 9 | healthcheck: 10 | interval: 5s 11 | timeout: 3s 12 | retries: 5 13 | test: ["CMD", "bash", "-c", "echo -n '' > /dev/tcp/127.0.0.1/7114"] 14 | environment: 15 | KAFKA_BOOTSTRAP_SERVER: kafka.examples.dev:29092 16 | volumes: 17 | - ./etc:/etc/zilla 18 | command: start -v -e 19 | 20 | kafka: 21 | image: bitnami/kafka:3.5 22 | restart: unless-stopped 23 | hostname: kafka.examples.dev 24 | ports: 25 | - 9092:9092 26 | healthcheck: 27 | test: /opt/bitnami/kafka/bin/kafka-cluster.sh cluster-id --bootstrap-server kafka.examples.dev:29092 || exit 1 28 | interval: 1s 29 | timeout: 60s 30 | retries: 60 31 | environment: 32 | ALLOW_PLAINTEXT_LISTENER: "yes" 33 | KAFKA_CFG_NODE_ID: "1" 34 | KAFKA_CFG_BROKER_ID: "1" 35 | KAFKA_CFG_GROUP_INITIAL_REBALANCE_DELAY_MS: "0" 36 | KAFKA_CFG_CONTROLLER_QUORUM_VOTERS: "1@127.0.0.1:9093" 37 | KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP: "CLIENT:PLAINTEXT,INTERNAL:PLAINTEXT,CONTROLLER:PLAINTEXT" 38 | KAFKA_CFG_CONTROLLER_LISTENER_NAMES: "CONTROLLER" 39 | KAFKA_CFG_LOG_DIRS: "/tmp/logs" 40 | KAFKA_CFG_PROCESS_ROLES: "broker,controller" 41 | KAFKA_CFG_LISTENERS: "CLIENT://:9092,INTERNAL://:29092,CONTROLLER://:9093" 42 | KAFKA_CFG_INTER_BROKER_LISTENER_NAME: "INTERNAL" 43 | KAFKA_CFG_ADVERTISED_LISTENERS: "CLIENT://localhost:9092,INTERNAL://kafka.examples.dev:29092" 44 | KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE: "true" 45 | 46 | kafka-init: 47 | image: bitnami/kafka:3.5 48 | user: root 49 | depends_on: 50 | kafka: 51 | condition: service_healthy 52 | restart: true 53 | deploy: 54 | restart_policy: 55 | condition: none 56 | max_attempts: 0 57 | entrypoint: ["/bin/sh", "-c"] 58 | environment: 59 | KAFKA_BOOTSTRAP_SERVER: kafka.examples.dev:29092 60 | command: 61 | - | 62 | echo -e "Creating kafka topic"; 63 | /opt/bitnami/kafka/bin/kafka-topics.sh --bootstrap-server $${KAFKA_BOOTSTRAP_SERVER} --create --if-not-exists --topic my-requests 64 | echo -e "Successfully created the following topics:"; 65 | /opt/bitnami/kafka/bin/kafka-topics.sh --bootstrap-server $${KAFKA_BOOTSTRAP_SERVER} --list; 66 | 67 | kafka-ui: 68 | image: ghcr.io/kafbat/kafka-ui:v1.0.0 69 | restart: unless-stopped 70 | ports: 71 | - 8080:8080 72 | depends_on: 73 | kafka: 74 | condition: service_healthy 75 | restart: true 76 | environment: 77 | KAFKA_CLUSTERS_0_NAME: local 78 | KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka.examples.dev:29092 79 | 80 | kafkacat: 81 | image: confluentinc/cp-kafkacat:7.1.9 82 | command: "bash" 83 | stdin_open: true 84 | tty: true 85 | 86 | networks: 87 | default: 88 | driver: bridge 89 | -------------------------------------------------------------------------------- /http.kafka.proto.json/etc/protos/request.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | message Request { 4 | string message = 1; 5 | int32 count = 2; 6 | } 7 | -------------------------------------------------------------------------------- /http.kafka.proto.json/etc/zilla.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: example 3 | catalogs: 4 | host_filesystem: 5 | type: filesystem 6 | options: 7 | subjects: 8 | my_proto_subject: 9 | path: protos/request.proto 10 | 11 | bindings: 12 | north_tcp_server: 13 | type: tcp 14 | kind: server 15 | options: 16 | host: 0.0.0.0 17 | port: 18 | - 7114 19 | routes: 20 | - when: 21 | - port: 7114 22 | exit: north_http_server 23 | north_http_server: 24 | type: http 25 | kind: server 26 | exit: north_http_kafka_mapping 27 | north_http_kafka_mapping: 28 | type: http-kafka 29 | kind: proxy 30 | routes: 31 | - when: 32 | - method: POST 33 | path: /requests 34 | exit: north_kafka_cache_client 35 | with: 36 | capability: produce 37 | topic: my-requests 38 | key: ${idempotencyKey} 39 | north_kafka_cache_client: 40 | type: kafka 41 | kind: cache_client 42 | options: 43 | topics: 44 | - name: my-requests 45 | value: 46 | model: protobuf 47 | view: json 48 | catalog: 49 | host_filesystem: 50 | - subject: my_proto_subject 51 | record: Request 52 | exit: south_kafka_cache_server 53 | south_kafka_cache_server: 54 | type: kafka 55 | kind: cache_server 56 | exit: south_kafka_client 57 | south_kafka_client: 58 | type: kafka 59 | kind: client 60 | options: 61 | servers: 62 | - ${{env.KAFKA_BOOTSTRAP_SERVER}} 63 | exit: south_tcp_client 64 | south_tcp_client: 65 | type: tcp 66 | kind: client 67 | telemetry: 68 | exporters: 69 | stdout_logs_exporter: 70 | type: stdout 71 | -------------------------------------------------------------------------------- /http.kafka.proto.oneway/.github/test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -x 3 | 4 | EXIT=0 5 | 6 | # GIVEN 7 | PORT="7114" 8 | PROTO_FILE="./request.proto" 9 | INPUT="message:'Hello, world',count:10" 10 | ENCODED_FILE="encoded_input.bin" 11 | EXPECTED="204" 12 | 13 | echo \# Testing http.kafka.proto.oneway/ 14 | echo PORT="$PORT" 15 | echo INPUT="$INPUT" 16 | echo EXPECTED="$EXPECTED" 17 | echo 18 | 19 | # Generate encoded Protobuf input once if not already generated 20 | if [ ! -f "$ENCODED_FILE" ]; then 21 | echo "$INPUT" | protoc --encode=Request "$PROTO_FILE" > "$ENCODED_FILE" 22 | fi 23 | 24 | # WHEN 25 | OUTPUT=$(curl -w "%{http_code}" -s --request POST http://localhost:$PORT/requests \ 26 | --header "Content-Type: application/protobuf" \ 27 | --data-binary @"$ENCODED_FILE") 28 | RESULT=$? 29 | echo RESULT="$RESULT" 30 | 31 | # THEN 32 | echo OUTPUT="$OUTPUT" 33 | echo EXPECTED="$EXPECTED" 34 | echo 35 | if [ "$RESULT" -eq 0 ] && [ "$OUTPUT" = "$EXPECTED" ]; then 36 | echo ✅ 37 | else 38 | echo ❌ 39 | EXIT=1 40 | fi 41 | 42 | exit $EXIT 43 | -------------------------------------------------------------------------------- /http.kafka.proto.oneway/README.md: -------------------------------------------------------------------------------- 1 | # http.kafka.proto.oneway 2 | 3 | This example allows a protobuf object to be sent to a REST edpoint as JSON that gets validated and converted to the protobuf when it is produced onto Kafka. 4 | 5 | ## Setup 6 | 7 | To `start` the Docker Compose stack defined in the [compose.yaml](compose.yaml) file, use: 8 | 9 | ```bash 10 | docker compose up -d 11 | ``` 12 | 13 | ## Watch kafka 14 | 15 | Open the [Kafka UI](http://localhost:8080/ui/clusters/local/all-topics/my-requests/messages?limit=100&mode=TAILING) or run the kafkacat command: 16 | 17 | ```bash 18 | docker compose -p zilla-http-kafka-proto-oneway exec kafkacat \ 19 | kafkacat -b kafka.examples.dev:29092 -C -f 'Key:Message | %k:%s\n Headers | %h \n\n' -t my-requests 20 | ``` 21 | 22 | ## Publish message with correct proto file 23 | 24 | ```bash 25 | echo "message:'hello world',count:10" \ 26 | | protoc --encode Request ./request.proto \ 27 | | curl -s --request POST http://localhost:7114/requests \ 28 | --header "Content-Type: application/protobuf" \ 29 | --data-binary @- 30 | ``` 31 | 32 | ## Block message with incorrect proto file 33 | 34 | ```bash 35 | echo "message:'hello bad type',count:'ten'" \ 36 | | protoc --encode Request ./request_bad_type.proto \ 37 | | curl -s --request POST http://localhost:7114/requests \ 38 | --header "Content-Type: application/protobuf" \ 39 | --data-binary @- 40 | ``` 41 | 42 | ```bash 43 | echo "message:'hello extra field',count:10,extra:'field'" \ 44 | | protoc --encode Request ./request_extra_field.proto \ 45 | | curl -s --request POST http://localhost:7114/requests \ 46 | --header "Content-Type: application/protobuf" \ 47 | --data-binary @- 48 | ``` 49 | 50 | ```bash 51 | echo "message:'hello wrong order',count:10" \ 52 | | protoc --encode Request ./request_wrong_order.proto \ 53 | | curl -s --request POST http://localhost:7114/requests \ 54 | --header "Content-Type: application/protobuf" \ 55 | --data-binary @- 56 | ``` 57 | 58 | ## Teardown 59 | 60 | To remove any resources created by the Docker Compose stack, use: 61 | 62 | ```bash 63 | docker compose down 64 | ``` 65 | -------------------------------------------------------------------------------- /http.kafka.proto.oneway/compose.yaml: -------------------------------------------------------------------------------- 1 | name: ${NAMESPACE:-zilla-http-kafka-proto-oneway} 2 | services: 3 | zilla: 4 | image: ghcr.io/aklivity/zilla:${ZILLA_VERSION:-latest} 5 | restart: unless-stopped 6 | hostname: zilla.examples.dev 7 | ports: 8 | - 7114:7114 9 | healthcheck: 10 | interval: 5s 11 | timeout: 3s 12 | retries: 5 13 | test: ["CMD", "bash", "-c", "echo -n '' > /dev/tcp/127.0.0.1/7114"] 14 | environment: 15 | KAFKA_BOOTSTRAP_SERVER: kafka.examples.dev:29092 16 | volumes: 17 | - ./etc:/etc/zilla 18 | command: start -v -e 19 | 20 | kafka: 21 | image: bitnami/kafka:3.5 22 | restart: unless-stopped 23 | hostname: kafka.examples.dev 24 | ports: 25 | - 9092:9092 26 | healthcheck: 27 | test: /opt/bitnami/kafka/bin/kafka-cluster.sh cluster-id --bootstrap-server kafka.examples.dev:29092 || exit 1 28 | interval: 1s 29 | timeout: 60s 30 | retries: 60 31 | environment: 32 | ALLOW_PLAINTEXT_LISTENER: "yes" 33 | KAFKA_CFG_NODE_ID: "1" 34 | KAFKA_CFG_BROKER_ID: "1" 35 | KAFKA_CFG_GROUP_INITIAL_REBALANCE_DELAY_MS: "0" 36 | KAFKA_CFG_CONTROLLER_QUORUM_VOTERS: "1@127.0.0.1:9093" 37 | KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP: "CLIENT:PLAINTEXT,INTERNAL:PLAINTEXT,CONTROLLER:PLAINTEXT" 38 | KAFKA_CFG_CONTROLLER_LISTENER_NAMES: "CONTROLLER" 39 | KAFKA_CFG_LOG_DIRS: "/tmp/logs" 40 | KAFKA_CFG_PROCESS_ROLES: "broker,controller" 41 | KAFKA_CFG_LISTENERS: "CLIENT://:9092,INTERNAL://:29092,CONTROLLER://:9093" 42 | KAFKA_CFG_INTER_BROKER_LISTENER_NAME: "INTERNAL" 43 | KAFKA_CFG_ADVERTISED_LISTENERS: "CLIENT://localhost:9092,INTERNAL://kafka.examples.dev:29092" 44 | KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE: "true" 45 | 46 | kafka-init: 47 | image: bitnami/kafka:3.5 48 | user: root 49 | depends_on: 50 | kafka: 51 | condition: service_healthy 52 | restart: true 53 | deploy: 54 | restart_policy: 55 | condition: none 56 | max_attempts: 0 57 | entrypoint: ["/bin/sh", "-c"] 58 | environment: 59 | KAFKA_BOOTSTRAP_SERVER: kafka.examples.dev:29092 60 | command: 61 | - | 62 | echo -e "Creating kafka topic"; 63 | /opt/bitnami/kafka/bin/kafka-topics.sh --bootstrap-server $${KAFKA_BOOTSTRAP_SERVER} --create --if-not-exists --topic my-requests 64 | echo -e "Successfully created the following topics:"; 65 | /opt/bitnami/kafka/bin/kafka-topics.sh --bootstrap-server $${KAFKA_BOOTSTRAP_SERVER} --list; 66 | 67 | kafka-ui: 68 | image: ghcr.io/kafbat/kafka-ui:v1.0.0 69 | restart: unless-stopped 70 | ports: 71 | - 8080:8080 72 | depends_on: 73 | kafka: 74 | condition: service_healthy 75 | restart: true 76 | environment: 77 | KAFKA_CLUSTERS_0_NAME: local 78 | KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka.examples.dev:29092 79 | 80 | kafkacat: 81 | image: confluentinc/cp-kafkacat:7.1.9 82 | command: "bash" 83 | stdin_open: true 84 | tty: true 85 | 86 | networks: 87 | default: 88 | driver: bridge 89 | -------------------------------------------------------------------------------- /http.kafka.proto.oneway/encoded_input.bin: -------------------------------------------------------------------------------- 1 | 2 | Hello, world 3 | -------------------------------------------------------------------------------- /http.kafka.proto.oneway/etc/protos/request.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | message Request { 4 | string message = 1; 5 | int32 count = 2; 6 | } 7 | -------------------------------------------------------------------------------- /http.kafka.proto.oneway/etc/protos/request_bad_type.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | message Request { 4 | string message = 1; 5 | string count = 2; 6 | } 7 | -------------------------------------------------------------------------------- /http.kafka.proto.oneway/etc/protos/request_extra_field.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | message Request { 4 | string message = 1; 5 | int32 count = 2; 6 | string extra = 3; 7 | } 8 | -------------------------------------------------------------------------------- /http.kafka.proto.oneway/etc/protos/request_wrong_order.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | message Request { 4 | int32 count = 1; 5 | string message = 2; 6 | } 7 | -------------------------------------------------------------------------------- /http.kafka.proto.oneway/etc/zilla.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: example 3 | catalogs: 4 | host_filesystem: 5 | type: filesystem 6 | options: 7 | subjects: 8 | my_proto_subject: 9 | path: protos/request.proto 10 | 11 | bindings: 12 | north_tcp_server: 13 | type: tcp 14 | kind: server 15 | options: 16 | host: 0.0.0.0 17 | port: 18 | - 7114 19 | routes: 20 | - when: 21 | - port: 7114 22 | exit: north_http_server 23 | north_http_server: 24 | type: http 25 | kind: server 26 | exit: north_http_kafka_mapping 27 | north_http_kafka_mapping: 28 | type: http-kafka 29 | kind: proxy 30 | routes: 31 | - when: 32 | - method: POST 33 | path: /requests 34 | exit: north_kafka_cache_client 35 | with: 36 | capability: produce 37 | topic: my-requests 38 | key: ${idempotencyKey} 39 | north_kafka_cache_client: 40 | type: kafka 41 | kind: cache_client 42 | options: 43 | topics: 44 | - name: my-requests 45 | value: 46 | model: protobuf 47 | catalog: 48 | host_filesystem: 49 | - subject: my_proto_subject 50 | record: Request 51 | exit: south_kafka_cache_server 52 | south_kafka_cache_server: 53 | type: kafka 54 | kind: cache_server 55 | exit: south_kafka_client 56 | south_kafka_client: 57 | type: kafka 58 | kind: client 59 | options: 60 | servers: 61 | - ${{env.KAFKA_BOOTSTRAP_SERVER}} 62 | exit: south_tcp_client 63 | south_tcp_client: 64 | type: tcp 65 | kind: client 66 | telemetry: 67 | exporters: 68 | stdout_logs_exporter: 69 | type: stdout 70 | -------------------------------------------------------------------------------- /http.kafka.sync/.github/test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -x 3 | 4 | EXIT=0 5 | 6 | # GIVEN 7 | PORT="7114" 8 | KAFKA_BOOTSTRAP_SERVER="kafka.examples.dev:29092" 9 | ITEM_ID="$(date +%s)" 10 | GREETING="Hello, World! $ITEM_ID" 11 | GREETING_DATE="Hello, World! $(date)" 12 | EXPECTED="{\"greeting\":\"$GREETING_DATE\"}" 13 | 14 | echo \# Testing http.kafka.sync/ 15 | echo PORT="$PORT" 16 | echo KAFKA_BOOTSTRAP_SERVER="$KAFKA_BOOTSTRAP_SERVER" 17 | echo ITEM_ID="$ITEM_ID" 18 | echo GREETING="$GREETING" 19 | echo GREETING_DATE="$GREETING_DATE" 20 | echo 21 | 22 | # WHEN 23 | # send request to zilla 24 | timeout 60s curl \ 25 | -X "PUT" http://localhost:$PORT/items/$ITEM_ID \ 26 | -H "Idempotency-Key: $ITEM_ID" \ 27 | -H "Content-Type: application/json" \ 28 | -d "{\"greeting\":\"$GREETING\"}" | tee .testoutput & 29 | 30 | # fetch correlation id from kafka with kafkacat; try 5 times 31 | for i in $(seq 0 5); do 32 | sleep $i 33 | CORRELATION_ID=$(docker compose -p zilla-http-kafka-sync exec kafkacat kafkacat -C -c 1 -o-1 -b $KAFKA_BOOTSTRAP_SERVER -t items-requests -J -u | jq -r '.headers | index("zilla:correlation-id") as $index | .[$index + 1]') 34 | if [ -n "$CORRELATION_ID" ]; then 35 | break 36 | fi 37 | done 38 | echo CORRELATION_ID="$CORRELATION_ID" 39 | if [ -z "$CORRELATION_ID" ]; then 40 | echo ❌ 41 | EXIT=1 42 | fi 43 | 44 | # push response to kafka with kafkacat 45 | echo "{\"greeting\":\"$GREETING_DATE\"}" | 46 | docker compose -p zilla-http-kafka-sync exec -T kafkacat \ 47 | kafkacat -P \ 48 | -b $KAFKA_BOOTSTRAP_SERVER \ 49 | -t items-responses \ 50 | -k "$ITEM_ID" \ 51 | -H ":status=200" \ 52 | -H "zilla:correlation-id=$CORRELATION_ID" 53 | 54 | # fetch the output of zilla request; try 5 times 55 | for i in $(seq 0 5); do 56 | sleep $i 57 | OUTPUT=$(cat .testoutput) 58 | if [ -n "$OUTPUT" ]; then 59 | break 60 | fi 61 | done 62 | rm .testoutput 63 | 64 | # THEN 65 | echo OUTPUT="$OUTPUT" 66 | echo EXPECTED="$EXPECTED" 67 | echo 68 | if [ "$OUTPUT" = "$EXPECTED" ]; then 69 | echo ✅ 70 | else 71 | echo ❌ 72 | EXIT=1 73 | fi 74 | 75 | exit $EXIT 76 | -------------------------------------------------------------------------------- /http.kafka.sync/etc/zilla.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: example 3 | bindings: 4 | north_tcp_server: 5 | type: tcp 6 | kind: server 7 | options: 8 | host: 0.0.0.0 9 | port: 10 | - 7114 11 | routes: 12 | - when: 13 | - port: 7114 14 | exit: north_http_server 15 | north_http_server: 16 | type: http 17 | kind: server 18 | routes: 19 | - when: 20 | - headers: 21 | :scheme: http 22 | exit: north_http_kafka_mapping 23 | north_http_kafka_mapping: 24 | type: http-kafka 25 | kind: proxy 26 | routes: 27 | - when: 28 | - path: /items/{id} 29 | exit: north_kafka_cache_client 30 | with: 31 | capability: produce 32 | topic: items-requests 33 | key: ${params.id} 34 | reply-to: items-responses 35 | north_kafka_cache_client: 36 | type: kafka 37 | kind: cache_client 38 | exit: south_kafka_cache_server 39 | south_kafka_cache_server: 40 | type: kafka 41 | kind: cache_server 42 | options: 43 | bootstrap: 44 | - items-responses 45 | exit: south_kafka_client 46 | south_kafka_client: 47 | type: kafka 48 | kind: client 49 | options: 50 | servers: 51 | - ${{env.KAFKA_BOOTSTRAP_SERVER}} 52 | exit: south_tcp_client 53 | south_tcp_client: 54 | type: tcp 55 | kind: client 56 | telemetry: 57 | exporters: 58 | stdout_logs_exporter: 59 | type: stdout 60 | -------------------------------------------------------------------------------- /http.proxy.jwt/.github/test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -x 3 | 4 | EXIT=0 5 | PORT="7114" 6 | MESSAGE="Hello, world" 7 | 8 | echo "# Testing http.proxy.jwt" 9 | echo "PORT=$PORT" 10 | 11 | # Generate JWT token without echo:stream scope 12 | JWT_TOKEN_NO_SCOPE=$(docker compose run --rm \ 13 | jwt-cli encode \ 14 | --alg "RS256" \ 15 | --kid "example" \ 16 | --iss "https://auth.example.com" \ 17 | --aud "https://api.example.com" \ 18 | --exp=+1d \ 19 | --no-iat \ 20 | --secret @/private.pem | tr -d '\r\n') 21 | 22 | UNAUTHORIZED_RESPONSE=$(curl -w "%{http_code}" http://localhost:$PORT/ \ 23 | -H "Authorization: Bearer $JWT_TOKEN_NO_SCOPE" \ 24 | -H "Content-Type: text/plain" \ 25 | -d "$MESSAGE") 26 | 27 | if [ "$UNAUTHORIZED_RESPONSE" = "404" ]; then 28 | echo ✅ 29 | else 30 | echo ❌ 31 | EXIT=1 32 | fi 33 | 34 | # Generate JWT token with echo:stream scope 35 | JWT_TOKEN_WITH_SCOPE=$(docker compose run --rm \ 36 | jwt-cli encode \ 37 | --alg "RS256" \ 38 | --kid "example" \ 39 | --iss "https://auth.example.com" \ 40 | --aud "https://api.example.com" \ 41 | --exp=+1d \ 42 | --no-iat \ 43 | --payload "scope=echo:stream" \ 44 | --secret @/private.pem | tr -d '\r\n') 45 | 46 | AUTHORIZED_RESPONSE=$(curl "http://localhost:$PORT/" \ 47 | -H "Authorization: Bearer $JWT_TOKEN_WITH_SCOPE" \ 48 | -H "Content-Type: text/plain" \ 49 | -d "$MESSAGE") 50 | 51 | if [ "$AUTHORIZED_RESPONSE" = "$MESSAGE" ]; then 52 | echo ✅ 53 | else 54 | echo ❌ 55 | EXIT=1 56 | fi 57 | 58 | exit $EXIT 59 | -------------------------------------------------------------------------------- /http.proxy.jwt/compose.yaml: -------------------------------------------------------------------------------- 1 | name: ${NAMESPACE:-zilla-http-proxy-jwt} 2 | services: 3 | zilla: 4 | image: ghcr.io/aklivity/zilla:${ZILLA_VERSION:-latest} 5 | restart: unless-stopped 6 | hostname: zilla.examples.dev 7 | ports: 8 | - 7114:7114 9 | healthcheck: 10 | interval: 5s 11 | timeout: 3s 12 | retries: 5 13 | test: ["CMD", "bash", "-c", "echo -n '' > /dev/tcp/127.0.0.1/7114"] 14 | volumes: 15 | - ./etc:/etc/zilla 16 | command: start -v -e 17 | 18 | jwt-cli: 19 | image: bitnami/jwt-cli 20 | stdin_open: true 21 | tty: true 22 | profiles: 23 | - on-demand 24 | volumes: 25 | - ./private.pem:/private.pem 26 | 27 | networks: 28 | default: 29 | driver: bridge 30 | -------------------------------------------------------------------------------- /http.proxy.jwt/etc/zilla.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: example 3 | guards: 4 | authn_jwt: 5 | type: jwt 6 | options: 7 | issuer: https://auth.example.com 8 | audience: https://api.example.com 9 | keys: 10 | - kty: RSA 11 | n: qqEu50hX+43Bx4W1UYWnAVKwFm+vDbP0kuIOSLVNa+HKQdHTf+3Sei5UCnkskn796izA29D0DdCy3ET9oaKRHIJyKbqFl0rv6f516QzOoXKC6N01sXBHBE/ovs0wwDvlaW+gFGPgkzdcfUlyrWLDnLV7LcuQymhTND2uH0oR3wJnNENN/OFgM1KGPPDOe19YsIKdLqARgxrhZVsh06OurEviZTXOBFI5r+yac7haDwOQhLHXNv+Y9MNvxs5QLWPFIM3bNUWfYrJnLrs4hGJS+y/KDM9Si+HL30QAFXy4YNO33J8DHjZ7ddG5n8/FqplOKvRtUgjcKWlxoGY4VdVaDQ== 12 | e: AQAB 13 | alg: RS256 14 | kid: example 15 | bindings: 16 | north_tcp_server: 17 | type: tcp 18 | kind: server 19 | options: 20 | host: 0.0.0.0 21 | port: 22 | - 7114 23 | routes: 24 | - when: 25 | - port: 7114 26 | exit: north_http_server 27 | north_http_server: 28 | type: http 29 | kind: server 30 | options: 31 | authorization: 32 | authn_jwt: 33 | credentials: 34 | headers: 35 | authorization: Bearer {credentials} 36 | routes: 37 | - guarded: 38 | authn_jwt: 39 | - echo:stream 40 | when: 41 | - headers: 42 | :scheme: http 43 | 44 | exit: north_echo_server 45 | north_echo_server: 46 | type: echo 47 | kind: server 48 | telemetry: 49 | exporters: 50 | stdout_logs_exporter: 51 | type: stdout 52 | -------------------------------------------------------------------------------- /http.proxy.jwt/private.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN RSA PRIVATE KEY----- 2 | MIIEowIBAAKCAQEAqqEu50hX+43Bx4W1UYWnAVKwFm+vDbP0kuIOSLVNa+HKQdHT 3 | f+3Sei5UCnkskn796izA29D0DdCy3ET9oaKRHIJyKbqFl0rv6f516QzOoXKC6N01 4 | sXBHBE/ovs0wwDvlaW+gFGPgkzdcfUlyrWLDnLV7LcuQymhTND2uH0oR3wJnNENN 5 | /OFgM1KGPPDOe19YsIKdLqARgxrhZVsh06OurEviZTXOBFI5r+yac7haDwOQhLHX 6 | Nv+Y9MNvxs5QLWPFIM3bNUWfYrJnLrs4hGJS+y/KDM9Si+HL30QAFXy4YNO33J8D 7 | HjZ7ddG5n8/FqplOKvRtUgjcKWlxoGY4VdVaDQIDAQABAoIBAQCU5nnO3UPiQs+S 8 | 9SzIynB16BnASpAhziOlNl4utwCsH2roS1pdLkXZ16oBRcNOyEF+5LUcXllL4Q7k 9 | x9PqBLrLfU5w+dNwtrVspmFxEXsUSqDQ45HimU9wBOff8aIUb3CAtSemA47MCajN 10 | sJXBlwmLLJgk25sr9yR810KnDXVQ/RQXWGnRiyVvgHjYyrhQEam22z95+60yJPbQ 11 | hFphWPGDj4O1kOkuQil/ciD5Br1IcrftNp32UZE1c5TyBL9kyLH0iYGELt+UR4ht 12 | kbkBYF4ai0tiDRSpi1M0yd+5EDbi7kK5qI/XKZiDFptKXFkb49hln6mYpzuumzxh 13 | W7MqNswlAoGBANLAXpLmcLm5IqvjGRT3V34O1C1ztQzR9h8Vor0hpfVZEcUTjQ8i 14 | HwzkIfIBQqfOpxCrutbPSC9gOQIrjPS4O04B6qI/n1xY8n2bm7xEN0w68cjeS+Aa 15 | duB8NmXGW3iTCWkZ8LntKKiijY/qM4DNTJwOHNnx/gEoccSJBX7eQdVrAoGBAM9D 16 | lEv2A6+AwQP19kiN+PVGtzQx/VD8URCe8b5FOxEY42Cfic13ZUK2o35cZKrWgw5T 17 | gO+PtrLGg6iHYv6Igib2urHJrX8bv4GhuUk3e4ZUDfBvQEctb+3ziDIH+dz1idxG 18 | 3NBm3YIyROrvV4sZknziPAxSSs0nsRO99LKC7HRnAoGABfL8OHVc1UFOoz+D472f 19 | sKVGhAnyIGyE4BfCQkiC4Mwk3kVRBA5YhgqikqxKb2Y7/RJ78bhkN0ImGdOU5QuW 20 | UPto3i+hlf/EyJrt6ICcdwBq9tqflbHpjSi0eGcDCgJMj7T1wKnbLfd4u4lG7unc 21 | scMMOXLFUs8LdxrPFue7QAUCgYAmVIEgaybTViRb7yjU0iywb5uB85y9VWxIfpyG 22 | a5c42jlyrQ53CRWY+N3TiJK1ZWZnR9oYc6N9/GhsylzsZaJsQkTUjE+hqigIeTgi 23 | 6jfV58WMKYbhou2IO/l8By2WR3JvYkuD3wIoCdPk/s5Z0yjcH5qrTKy4tBZzaDXQ 24 | rZW9EQKBgEpfuGRtsUE+FUEqLcqfI3w5aYMKb0pEsG4zbvxB/4QeHdAawNEqeSrS 25 | TC0boMRlZ3Mf6GpZ9No/qVvfsW45PghmT77b4nsR2Sh0mdPMbBzdoqOOTwgNAoKP 26 | RHsZtVdb9Gk7Jf2UHTEZY5NLKyXGH/qj4/7ajxixtHNvlD3oI14F 27 | -----END RSA PRIVATE KEY----- 28 | -------------------------------------------------------------------------------- /http.proxy/.github/test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -x 3 | 4 | EXIT=0 5 | 6 | # GIVEN 7 | PORT="7143" 8 | EXPECTED=' 9 | 10 | 11 | This is the title of the webpage! 12 | 13 | 14 | 15 |

This is an example paragraph. Anything in the body tag will appear on the page, just like this p tag and its contents.

16 | 17 | ' 18 | echo \# Testing http.proxy/ 19 | echo PORT="$PORT" 20 | echo EXPECTED="$EXPECTED" 21 | echo 22 | 23 | # WHEN 24 | OUTPUT=$(docker compose -p zilla-http-proxy exec nghttp nghttp --no-verify https://zilla.examples.dev:$PORT/demo.html) 25 | RESULT=$? 26 | echo RESULT="$RESULT" 27 | 28 | # THEN 29 | echo OUTPUT="$OUTPUT" 30 | echo EXPECTED="$EXPECTED" 31 | echo 32 | if [ "$RESULT" -eq 0 ] && [ "$OUTPUT" = "$EXPECTED" ]; then 33 | echo ✅ 34 | else 35 | echo ❌ 36 | EXIT=1 37 | fi 38 | 39 | exit $EXIT 40 | -------------------------------------------------------------------------------- /http.proxy/README.md: -------------------------------------------------------------------------------- 1 | # http.proxy 2 | 3 | Listens on https port `7143` and will response back whatever is hosted in `nginx` on that path. 4 | 5 | ## Requirements 6 | 7 | - docker compose 8 | - [nghttp2](https://nghttp2.org/) 9 | 10 | ### Install nghttp2 client 11 | 12 | nghttp2 is an implementation of HTTP/2 client. 13 | 14 | ```bash 15 | brew install nghttp2 16 | ``` 17 | 18 | ## Setup 19 | 20 | To `start` the Docker Compose stack defined in the [compose.yaml](compose.yaml) file, use: 21 | 22 | ```bash 23 | docker compose up -d 24 | ``` 25 | 26 | ### Verify behavior 27 | 28 | ```bash 29 | nghttp -ansy https://localhost:7143/demo.html 30 | ``` 31 | 32 | output: 33 | 34 | ```text 35 | ***** Statistics ***** 36 | 37 | Request timing: 38 | responseEnd: the time when last byte of response was received 39 | relative to connectEnd 40 | requestStart: the time just before first byte of request was sent 41 | relative to connectEnd. If '*' is shown, this was 42 | pushed by server. 43 | process: responseEnd - requestStart 44 | code: HTTP status code 45 | size: number of bytes received as response body without 46 | inflation. 47 | URI: request URI 48 | 49 | see http://www.w3.org/TR/resource-timing/#processing-model 50 | 51 | sorted by 'complete' 52 | 53 | id responseEnd requestStart process code size request path 54 | 13 +921.19ms +146us 921.05ms 200 320 /demo.html 55 | 2 +923.02ms * +912.81ms 10.21ms 200 89 /style.css 56 | ``` 57 | 58 | you get `/style.css` response as push promise that nginx is configured with. 59 | 60 | ## Teardown 61 | 62 | To remove any resources created by the Docker Compose stack, use: 63 | 64 | ```bash 65 | docker compose down 66 | ``` 67 | -------------------------------------------------------------------------------- /http.proxy/compose.yaml: -------------------------------------------------------------------------------- 1 | name: ${NAMESPACE:-zilla-http-proxy} 2 | services: 3 | zilla: 4 | image: ghcr.io/aklivity/zilla:${ZILLA_VERSION:-latest} 5 | restart: unless-stopped 6 | hostname: zilla.examples.dev 7 | ports: 8 | - 7143:7143 9 | healthcheck: 10 | interval: 5s 11 | timeout: 3s 12 | retries: 5 13 | test: ["CMD", "bash", "-c", "echo -n '' > /dev/tcp/127.0.0.1/7143"] 14 | environment: 15 | KEYSTORE_PASSWORD: generated 16 | volumes: 17 | - ./etc:/etc/zilla 18 | command: start -v -e 19 | 20 | nginx: 21 | image: nginx:stable 22 | restart: unless-stopped 23 | ports: 24 | - 443:443 25 | volumes: 26 | - ./nginx.conf:/etc/nginx/nginx.conf 27 | - ./ssl:/etc/nginx/ssl 28 | - ./www:/usr/share/nginx/html 29 | 30 | nghttp: 31 | image: dajobe/nghttpx 32 | command: sleep infinity 33 | stdin_open: true 34 | tty: true 35 | 36 | networks: 37 | default: 38 | driver: bridge 39 | -------------------------------------------------------------------------------- /http.proxy/etc/tls/localhost.p12: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aklivity/zilla-examples/7a5307d97d4a9e1fd731ac4fc3230f0b9e53e36e/http.proxy/etc/tls/localhost.p12 -------------------------------------------------------------------------------- /http.proxy/etc/tls/truststore.p12: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aklivity/zilla-examples/7a5307d97d4a9e1fd731ac4fc3230f0b9e53e36e/http.proxy/etc/tls/truststore.p12 -------------------------------------------------------------------------------- /http.proxy/etc/zilla.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: example 3 | vaults: 4 | your_clients: 5 | type: filesystem 6 | options: 7 | trust: 8 | store: tls/truststore.p12 9 | type: pkcs12 10 | password: ${{env.KEYSTORE_PASSWORD}} 11 | my_servers: 12 | type: filesystem 13 | options: 14 | keys: 15 | store: tls/localhost.p12 16 | type: pkcs12 17 | password: ${{env.KEYSTORE_PASSWORD}} 18 | bindings: 19 | north_tcp_server: 20 | type: tcp 21 | kind: server 22 | options: 23 | host: 0.0.0.0 24 | port: 7143 25 | exit: north_tls_server 26 | north_tls_server: 27 | type: tls 28 | kind: server 29 | vault: my_servers 30 | options: 31 | keys: 32 | - localhost 33 | sni: 34 | - localhost 35 | alpn: 36 | - h2 37 | exit: north_http_server 38 | north_http_server: 39 | type: http 40 | kind: server 41 | options: 42 | access-control: 43 | policy: cross-origin 44 | routes: 45 | - when: 46 | - headers: 47 | :scheme: https 48 | :path: /* 49 | exit: south_http_client 50 | south_http_client: 51 | type: http 52 | kind: client 53 | exit: south_tls_client 54 | south_tls_client: 55 | type: tls 56 | kind: client 57 | vault: your_clients 58 | options: 59 | trustcacerts: true 60 | trust: 61 | - nginx 62 | sni: 63 | - nginx 64 | alpn: 65 | - h2 66 | exit: south_tcp_client 67 | south_tcp_client: 68 | type: tcp 69 | kind: client 70 | options: 71 | host: nginx 72 | port: 443 73 | telemetry: 74 | exporters: 75 | stdout_logs_exporter: 76 | type: stdout 77 | -------------------------------------------------------------------------------- /http.proxy/nginx.conf: -------------------------------------------------------------------------------- 1 | user nginx; 2 | worker_processes 1; 3 | 4 | error_log /var/log/nginx/error.log warn; 5 | pid /var/run/nginx.pid; 6 | 7 | events { 8 | worker_connections 1024; 9 | } 10 | 11 | http { 12 | include /etc/nginx/mime.types; 13 | default_type application/octet-stream; 14 | 15 | log_format main '$remote_addr - $remote_user [$time_local] "$request" ' 16 | '$status $body_bytes_sent "$http_referer" ' 17 | '"$http_user_agent" "$http_x_forwarded_for"'; 18 | 19 | access_log /var/log/nginx/access.log main; 20 | 21 | sendfile on; 22 | 23 | keepalive_timeout 65; 24 | 25 | server { 26 | listen 443 ssl http2; 27 | 28 | ssl_certificate /etc/nginx/ssl/cert.pem; 29 | ssl_certificate_key /etc/nginx/ssl/key.pem; 30 | 31 | root /usr/share/nginx/html; 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /http.proxy/ssl/cert.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIGPzCCBCegAwIBAgIJAP0KJjotH2akMA0GCSqGSIb3DQEBCwUAMG8xCzAJBgNV 3 | BAYTAlVTMRMwEQYDVQQIDApDYWxpZm9ybmlhMRIwEAYDVQQHDAlQYWxvIEFsdG8x 4 | ETAPBgNVBAoMCEFrbGl2aXR5MRQwEgYDVQQLDAtEZXZlbG9wbWVudDEOMAwGA1UE 5 | AwwFbmdpbngwHhcNMjMwMTIwMTk0MDU4WhcNMzIxMDE5MTk0MDU4WjBvMQswCQYD 6 | VQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTESMBAGA1UEBwwJUGFsbyBBbHRv 7 | MREwDwYDVQQKDAhBa2xpdml0eTEUMBIGA1UECwwLRGV2ZWxvcG1lbnQxDjAMBgNV 8 | BAMMBW5naW54MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAttPoRzh5 9 | gETTX/sB3xV0U8HrHUoLM9l1hh+cU/SIlV6OR7lX5p/KxaGB4Jvf6eipBqi7kPJU 10 | hHgaTRLo9SqsrQxK4kNQmBoAfPpQirL2qiOK37QH9uB6ejFEpc+SaNUtPd3F+xL0 11 | 0WZO/Kx7S/Tv2y2ZUd6bi7K4CLTn0UHT5aD/Xn/rNqxWWKR+H1VTOAhpAQNDvUUG 12 | sY8DDOuJRfLdlp1CybqGFuZyyv0+PR4QMDUW/yOQfWHVSo7E+DL5m+8c2Rbz0uyR 13 | PxSeYB6vxoEU8WtVtWOYltjm4ZnwDo3Kt6yJTrWYPAx0VzEzgU8jcF8gXP543eBF 14 | fgySVVL1Kc6jzoGPjKBbzBgWGyfxsLvjJJoV/7oZk/yzCp7tB/lej1bkUfKxBFAU 15 | a0nSafeN8R8W3rZB2OLFJT0je5rGKxKHYfrF9dGFqOxeQtclbTFOZlMBerOOZYYo 16 | bnbnBNxaMRS3DRRxriqKWwusR5/Bofy+i64n0c4sAyvSOO+cCQqJu9h58A8K7PD5 17 | PxXSImPoWuKuUENJSvaDL3NVwX1opWpjA4cNpXrpewy/a6tDxsfoYtsM0ZS4+CaV 18 | i1hGqsrsF5w8GGkQRwsxkpzegWbJeJEAMX85t0JmJtnLhnQDCYdnBxbdtVlkURgF 19 | dS8XT6y3ZB880+TbQrwQO+xKk7gcaJgiOfMCAwEAAaOB3TCB2jAOBgNVHQ8BAf8E 20 | BAMCBaAwEwYDVR0lBAwwCgYIKwYBBQUHAwEwgYsGA1UdIwSBgzCBgKFzpHEwbzEL 21 | MAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWExEjAQBgNVBAcMCVBhbG8g 22 | QWx0bzERMA8GA1UECgwIQWtsaXZpdHkxFDASBgNVBAsMC0RldmVsb3BtZW50MQ4w 23 | DAYDVQQDDAVuZ2lueIIJAP0KJjotH2akMCUGA1UdEQQeMByCBW5naW54ghMqLnN2 24 | Yy5jbHVzdGVyLmxvY2FsMA0GCSqGSIb3DQEBCwUAA4ICAQCgcBZrcA4pHhFtu3SR 25 | 9CScQNKIjikKDnoFDaeE1pHB1TdYHwoZrNTm6wnPDjzOSCp5f+YxPVehfG7Dw+b5 26 | r6G1DR+hHY6ensBQdHP0DjM/lYxWv5WFT5eziGPHvNO/JOJbWR3YQDWzkmwwrdcx 27 | rcsBAfjatY9T5dY1T8KWzFgKgPxu7MqflZKV95mSR2XiOMLqKbrQWMof0fQ9hcTp 28 | qOjADTbJo/Z7mVhM/ON6bdSywCvcsJm5uzZkqu+5sg44RCA+mGAeLZnBTIeg9Uv6 29 | QO/oOpX1cmYw58cWfw/pzHZprUm+1r1G3pfefQmUaXwf7+FkdQ0xvPG8HCl4rPBo 30 | 0lew4wt4og9QCYRev2igEHsOqlMP3bg3/OsIPNcjwUFPIOOutnxTaV8qIWE5AzQH 31 | 7XRyPtMoEiRh6sqvcYBASqZC/eHB3h0GEFjyQCIoiBehyKT24xOALq2199mgu+cx 32 | 5iLgwn9XNUMI75MoCrMIq2Pohe38WeoAJIQKRqP83J61Pwczu/pRFwxVq1tNBWbN 33 | vRHUmhVgzT4Z68YAbeIPSTiLqfh1TR1oi99o31F/F5GWWgYq9hffGHJ5dCLcnWlN 34 | 1+ABwKz9czFsDTjy9F9fPgr/LUUkhmgsotzWnqWah00JmIf2fY9Skv/j+OrtIdkm 35 | tqdcWosDTCObS/cT4TOJKMPQXw== 36 | -----END CERTIFICATE----- 37 | -------------------------------------------------------------------------------- /http.proxy/www/demo.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | This is the title of the webpage! 5 | 6 | 7 | 8 |

This is an example paragraph. Anything in the body tag will appear on the page, just like this p tag and its contents.

9 | 10 | 11 | -------------------------------------------------------------------------------- /http.proxy/www/style.css: -------------------------------------------------------------------------------- 1 | body { 2 | background-color: powderblue; 3 | } 4 | h1 { 5 | color: blue; 6 | } 7 | p { 8 | color: red; 9 | } 10 | -------------------------------------------------------------------------------- /mqtt.kafka.proxy/.github/test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -x 3 | 4 | EXIT=0 5 | 6 | # GIVEN 7 | PORT="7183" 8 | INPUT='Hello Zilla!' 9 | EXPECTED='Hello Zilla!' 10 | echo \# Testing mqtt.kafka.proxy 11 | echo PORT="$PORT" 12 | echo INPUT="$INPUT" 13 | echo EXPECTED="$EXPECTED" 14 | echo 15 | 16 | # WHEN 17 | 18 | for i in $(seq 1 5); do 19 | docker compose -p zilla-mqtt-kafka-proxy exec -T mosquitto-cli \ 20 | mosquitto_pub --url mqtt://zilla.examples.dev:"$PORT"/zilla --message "Test" 21 | 22 | if [ $? -eq 0 ]; then 23 | echo "✅ Zilla is reachable." 24 | break 25 | fi 26 | 27 | sleep 2 28 | done 29 | 30 | OUTPUT=$( 31 | docker compose -p zilla-mqtt-kafka-proxy exec -T mosquitto-cli \ 32 | timeout 5s mosquitto_sub --url mqtt://zilla.examples.dev:"$PORT"/zilla & 33 | 34 | SUB_PID=$! 35 | 36 | sleep 1 37 | 38 | docker compose -p zilla-mqtt-kafka-proxy exec -T mosquitto-cli \ 39 | mosquitto_pub --url mqtt://zilla.examples.dev:"$PORT"/zilla --message "$INPUT" 40 | 41 | wait $SUB_PID 42 | ) 43 | 44 | RESULT=$? 45 | echo RESULT="$RESULT" 46 | 47 | # THEN 48 | echo OUTPUT="$OUTPUT" 49 | echo EXPECTED="$EXPECTED" 50 | echo 51 | if [ "$RESULT" -eq 0 ] && [ "$OUTPUT" = "$EXPECTED" ]; then 52 | echo ✅ 53 | else 54 | echo ❌ 55 | EXIT=1 56 | fi 57 | 58 | exit $EXIT 59 | -------------------------------------------------------------------------------- /mqtt.kafka.proxy/README.md: -------------------------------------------------------------------------------- 1 | # mqtt.kafka.proxy 2 | 3 | In this guide, you create Kafka topics and use Zilla to mediate MQTT broker messages onto those topics. 4 | 5 | ## Setup 6 | 7 | To `start` the Docker Compose stack defined in the [compose.yaml](compose.yaml) file, use: 8 | 9 | ```bash 10 | docker compose up -d 11 | ``` 12 | 13 | ### Using this example 14 | 15 | Using `mosquitto-cli` subscribe to the `zilla` topic. 16 | 17 | ```bash 18 | docker compose -p zilla-mqtt-kafka-proxy exec -T mosquitto-cli \ 19 | mosquitto_sub --url mqtt://zilla.examples.dev:7183/zilla --debug 20 | ``` 21 | 22 | output: 23 | 24 | ```text 25 | Client null sending CONNECT 26 | Client null received CONNACK (0) 27 | Client null sending SUBSCRIBE (Mid: 1, Topic: zilla, QoS: 0, Options: 0x00) 28 | Client null received SUBACK 29 | Subscribed (mid: 1): 0 30 | Client null received PUBLISH (d0, q0, r0, m0, 'zilla', ... (12 bytes)) 31 | Hello Zilla! 32 | ``` 33 | 34 | In a separate session, publish a valid message on the `zilla` topic. 35 | 36 | ```bash 37 | docker compose -p zilla-mqtt-kafka-proxy exec -T mosquitto-cli \ 38 | mosquitto_pub --url mqtt://zilla.examples.dev:7183/zilla --message 'Hello Zilla!' --debug 39 | ``` 40 | 41 | output: 42 | 43 | ``` 44 | Client null sending CONNECT 45 | Client null received CONNACK (0) 46 | Client null sending PUBLISH (d0, q0, r0, m1, 'zilla', ... (12 bytes)) 47 | Client null sending DISCONNECT 48 | ``` 49 | 50 | ## Teardown 51 | 52 | To remove any resources created by the Docker Compose stack, use: 53 | 54 | ```bash 55 | docker compose down 56 | ``` 57 | 58 | -------------------------------------------------------------------------------- /mqtt.kafka.proxy/etc/zilla.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: zilla-mqtt-kafka-proxy 3 | bindings: 4 | north_tcp_server: 5 | type: tcp 6 | kind: server 7 | options: 8 | host: 0.0.0.0 9 | port: 10 | - 7183 11 | routes: 12 | - when: 13 | - port: 7183 14 | exit: north_mqtt_server 15 | north_mqtt_server: 16 | type: mqtt 17 | kind: server 18 | exit: north_mqtt_kafka_mapping 19 | north_mqtt_kafka_mapping: 20 | type: mqtt-kafka 21 | kind: proxy 22 | options: 23 | topics: 24 | sessions: mqtt-sessions 25 | messages: mqtt-messages 26 | retained: mqtt-retained 27 | clients: 28 | - place/{identity}/# 29 | routes: 30 | - when: 31 | - publish: 32 | - topic: place/+/device/# 33 | - topic: device/# 34 | - subscribe: 35 | - topic: place/+/device/# 36 | - topic: device/# 37 | with: 38 | messages: mqtt-devices 39 | exit: north_kafka_cache_client 40 | exit: north_kafka_cache_client 41 | north_kafka_cache_client: 42 | type: kafka 43 | kind: cache_client 44 | exit: south_kafka_cache_server 45 | south_kafka_cache_server: 46 | type: kafka 47 | kind: cache_server 48 | options: 49 | bootstrap: 50 | - mqtt-messages 51 | - mqtt-retained 52 | - mqtt-devices 53 | exit: south_kafka_client 54 | south_kafka_client: 55 | type: kafka 56 | kind: client 57 | options: 58 | servers: 59 | - ${{env.KAFKA_BOOTSTRAP_SERVER}} 60 | exit: south_tcp_client 61 | south_tcp_client: 62 | type: tcp 63 | kind: client 64 | telemetry: 65 | exporters: 66 | stdout_logs_exporter: 67 | type: stdout 68 | -------------------------------------------------------------------------------- /mqtt.proxy.jwt/.github/test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -x 3 | 4 | EXIT=0 5 | 6 | # GIVEN 7 | PORT="7183" 8 | MESSAGE="Hello, world" 9 | echo \# Testing mqtt.proxy.jwt 10 | echo PORT="$PORT" 11 | echo MESSAGE="$MESSAGE" 12 | echo 13 | 14 | # Generate JWT token without mqtt:stream scope 15 | JWT_TOKEN_NO_SCOPE=$(docker compose run --rm \ 16 | jwt-cli encode \ 17 | --alg "RS256" \ 18 | --kid "example" \ 19 | --iss "https://auth.example.com" \ 20 | --aud "https://api.example.com" \ 21 | --exp=+1d \ 22 | --no-iat \ 23 | --secret @/private.pem | tr -d '\r\n') 24 | 25 | # WHEN 26 | OUTPUT=$( 27 | docker compose -p zilla-mqtt-proxy-jwt exec -T mosquitto-cli \ 28 | timeout 5s mosquitto_sub --url mqtt://zilla.examples.dev:"$PORT"/zilla -u "Bearer $JWT_TOKEN_NO_SCOPE" || echo "Unauthorized" 29 | ) 30 | 31 | RESULT=$? 32 | echo RESULT="$RESULT" 33 | echo OUTPUT="$OUTPUT" 34 | echo EXPECTED="Unauthorized" 35 | echo 36 | 37 | # THEN 38 | if [ "$RESULT" -ne 0 ] || [ "$OUTPUT" = "Unauthorized" ]; then 39 | echo ✅ 40 | else 41 | echo ❌ 42 | EXIT=1 43 | fi 44 | 45 | # Generate JWT token with mqtt:stream scope 46 | JWT_TOKEN_WITH_SCOPE=$(docker compose run --rm \ 47 | jwt-cli encode \ 48 | --alg "RS256" \ 49 | --kid "example" \ 50 | --iss "https://auth.example.com" \ 51 | --aud "https://api.example.com" \ 52 | --exp=+1d \ 53 | --no-iat \ 54 | --payload "scope=mqtt:stream" \ 55 | --secret @/private.pem | tr -d '\r\n') 56 | 57 | # WHEN 58 | OUTPUT=$( 59 | docker compose -p zilla-mqtt-proxy-jwt exec -T mosquitto-cli \ 60 | timeout 5s mosquitto_sub --url mqtt://zilla.examples.dev:"$PORT"/zilla -u "Bearer $JWT_TOKEN_WITH_SCOPE" & 61 | 62 | SUB_PID=$! 63 | 64 | sleep 1 65 | 66 | # Publish a message 67 | docker compose -p zilla-mqtt-proxy-jwt exec -T mosquitto-cli \ 68 | mosquitto_pub --url mqtt://zilla.examples.dev:"$PORT"/zilla --message "$MESSAGE" -u "Bearer $JWT_TOKEN_WITH_SCOPE" 69 | 70 | wait $SUB_PID 71 | ) 72 | 73 | RESULT=$? 74 | echo RESULT="$RESULT" 75 | echo OUTPUT="$OUTPUT" 76 | echo EXPECTED="$MESSAGE" 77 | echo 78 | 79 | # THEN 80 | if [ "$RESULT" -eq 0 ] && echo "$OUTPUT" | grep -q "$MESSAGE"; then 81 | echo ✅ 82 | else 83 | echo ❌ 84 | EXIT=1 85 | fi 86 | 87 | exit $EXIT 88 | -------------------------------------------------------------------------------- /mqtt.proxy.jwt/compose.yaml: -------------------------------------------------------------------------------- 1 | name: ${NAMESPACE:-zilla-mqtt-proxy-jwt} 2 | services: 3 | zilla: 4 | image: ghcr.io/aklivity/zilla:${ZILLA_VERSION:-latest} 5 | restart: unless-stopped 6 | hostname: zilla.examples.dev 7 | ports: 8 | - 7183:7183 9 | healthcheck: 10 | interval: 5s 11 | timeout: 3s 12 | retries: 5 13 | test: ["CMD", "bash", "-c", "echo -n '' > /dev/tcp/127.0.0.1/7183"] 14 | environment: 15 | MOSQUITTO_BROKER_HOST: mosquitto 16 | MOSQUITTO_BROKER_PORT: 1883 17 | KEYSTORE_PASSWORD: generated 18 | volumes: 19 | - ./etc:/etc/zilla 20 | command: start -v -e 21 | 22 | mosquitto: 23 | image: eclipse-mosquitto:2.0 24 | restart: unless-stopped 25 | ports: 26 | - 1883:1883 27 | configs: 28 | - source: mosquitto.conf 29 | target: /mosquitto/config/mosquitto.conf 30 | 31 | mosquitto-cli: 32 | image: eclipse-mosquitto:2.0 33 | command: "/bin/sh" 34 | stdin_open: true 35 | tty: true 36 | 37 | jwt-cli: 38 | image: bitnami/jwt-cli 39 | stdin_open: true 40 | tty: true 41 | profiles: 42 | - on-demand 43 | volumes: 44 | - ./private.pem:/private.pem 45 | 46 | configs: 47 | mosquitto.conf: 48 | content: | 49 | # DO NOT USE IN PRODUCTION 50 | allow_anonymous true 51 | listener 1883 52 | protocol mqtt 53 | 54 | networks: 55 | default: 56 | driver: bridge 57 | -------------------------------------------------------------------------------- /mqtt.proxy.jwt/etc/tls/localhost.p12: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aklivity/zilla-examples/7a5307d97d4a9e1fd731ac4fc3230f0b9e53e36e/mqtt.proxy.jwt/etc/tls/localhost.p12 -------------------------------------------------------------------------------- /mqtt.proxy.jwt/etc/zilla.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: zilla-mqtt-proxy-jwt 3 | vaults: 4 | my_servers: 5 | type: filesystem 6 | options: 7 | keys: 8 | store: tls/localhost.p12 9 | type: pkcs12 10 | password: ${{env.KEYSTORE_PASSWORD}} 11 | guards: 12 | authn_jwt: 13 | type: jwt 14 | options: 15 | issuer: https://auth.example.com 16 | audience: https://api.example.com 17 | keys: 18 | - kty: RSA 19 | n: qqEu50hX+43Bx4W1UYWnAVKwFm+vDbP0kuIOSLVNa+HKQdHTf+3Sei5UCnkskn796izA29D0DdCy3ET9oaKRHIJyKbqFl0rv6f516QzOoXKC6N01sXBHBE/ovs0wwDvlaW+gFGPgkzdcfUlyrWLDnLV7LcuQymhTND2uH0oR3wJnNENN/OFgM1KGPPDOe19YsIKdLqARgxrhZVsh06OurEviZTXOBFI5r+yac7haDwOQhLHXNv+Y9MNvxs5QLWPFIM3bNUWfYrJnLrs4hGJS+y/KDM9Si+HL30QAFXy4YNO33J8DHjZ7ddG5n8/FqplOKvRtUgjcKWlxoGY4VdVaDQ== 20 | e: AQAB 21 | alg: RS256 22 | kid: example 23 | bindings: 24 | north_tcp_server: 25 | type: tcp 26 | kind: server 27 | options: 28 | host: 0.0.0.0 29 | port: 30 | - 7183 31 | - 7883 32 | routes: 33 | - when: 34 | - port: 7183 35 | exit: north_mqtt_server 36 | - when: 37 | - port: 7883 38 | exit: north_tls_server 39 | north_tls_server: 40 | type: tls 41 | kind: server 42 | vault: my_servers 43 | options: 44 | keys: 45 | - localhost 46 | sni: 47 | - localhost 48 | exit: north_mqtt_server 49 | north_mqtt_server: 50 | type: mqtt 51 | kind: server 52 | options: 53 | authorization: 54 | authn_jwt: 55 | credentials: 56 | connect: 57 | username: Bearer {credentials} 58 | routes: 59 | - guarded: 60 | authn_jwt: 61 | - mqtt:stream 62 | exit: north_mqtt_client 63 | north_mqtt_client: 64 | type: mqtt 65 | kind: client 66 | exit: south_tcp_client 67 | south_tcp_client: 68 | type: tcp 69 | kind: client 70 | options: 71 | host: ${{env.MOSQUITTO_BROKER_HOST}} 72 | port: ${{env.MOSQUITTO_BROKER_PORT}} 73 | telemetry: 74 | exporters: 75 | stdout_logs_exporter: 76 | type: stdout 77 | -------------------------------------------------------------------------------- /mqtt.proxy.jwt/private.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN RSA PRIVATE KEY----- 2 | MIIEowIBAAKCAQEAqqEu50hX+43Bx4W1UYWnAVKwFm+vDbP0kuIOSLVNa+HKQdHT 3 | f+3Sei5UCnkskn796izA29D0DdCy3ET9oaKRHIJyKbqFl0rv6f516QzOoXKC6N01 4 | sXBHBE/ovs0wwDvlaW+gFGPgkzdcfUlyrWLDnLV7LcuQymhTND2uH0oR3wJnNENN 5 | /OFgM1KGPPDOe19YsIKdLqARgxrhZVsh06OurEviZTXOBFI5r+yac7haDwOQhLHX 6 | Nv+Y9MNvxs5QLWPFIM3bNUWfYrJnLrs4hGJS+y/KDM9Si+HL30QAFXy4YNO33J8D 7 | HjZ7ddG5n8/FqplOKvRtUgjcKWlxoGY4VdVaDQIDAQABAoIBAQCU5nnO3UPiQs+S 8 | 9SzIynB16BnASpAhziOlNl4utwCsH2roS1pdLkXZ16oBRcNOyEF+5LUcXllL4Q7k 9 | x9PqBLrLfU5w+dNwtrVspmFxEXsUSqDQ45HimU9wBOff8aIUb3CAtSemA47MCajN 10 | sJXBlwmLLJgk25sr9yR810KnDXVQ/RQXWGnRiyVvgHjYyrhQEam22z95+60yJPbQ 11 | hFphWPGDj4O1kOkuQil/ciD5Br1IcrftNp32UZE1c5TyBL9kyLH0iYGELt+UR4ht 12 | kbkBYF4ai0tiDRSpi1M0yd+5EDbi7kK5qI/XKZiDFptKXFkb49hln6mYpzuumzxh 13 | W7MqNswlAoGBANLAXpLmcLm5IqvjGRT3V34O1C1ztQzR9h8Vor0hpfVZEcUTjQ8i 14 | HwzkIfIBQqfOpxCrutbPSC9gOQIrjPS4O04B6qI/n1xY8n2bm7xEN0w68cjeS+Aa 15 | duB8NmXGW3iTCWkZ8LntKKiijY/qM4DNTJwOHNnx/gEoccSJBX7eQdVrAoGBAM9D 16 | lEv2A6+AwQP19kiN+PVGtzQx/VD8URCe8b5FOxEY42Cfic13ZUK2o35cZKrWgw5T 17 | gO+PtrLGg6iHYv6Igib2urHJrX8bv4GhuUk3e4ZUDfBvQEctb+3ziDIH+dz1idxG 18 | 3NBm3YIyROrvV4sZknziPAxSSs0nsRO99LKC7HRnAoGABfL8OHVc1UFOoz+D472f 19 | sKVGhAnyIGyE4BfCQkiC4Mwk3kVRBA5YhgqikqxKb2Y7/RJ78bhkN0ImGdOU5QuW 20 | UPto3i+hlf/EyJrt6ICcdwBq9tqflbHpjSi0eGcDCgJMj7T1wKnbLfd4u4lG7unc 21 | scMMOXLFUs8LdxrPFue7QAUCgYAmVIEgaybTViRb7yjU0iywb5uB85y9VWxIfpyG 22 | a5c42jlyrQ53CRWY+N3TiJK1ZWZnR9oYc6N9/GhsylzsZaJsQkTUjE+hqigIeTgi 23 | 6jfV58WMKYbhou2IO/l8By2WR3JvYkuD3wIoCdPk/s5Z0yjcH5qrTKy4tBZzaDXQ 24 | rZW9EQKBgEpfuGRtsUE+FUEqLcqfI3w5aYMKb0pEsG4zbvxB/4QeHdAawNEqeSrS 25 | TC0boMRlZ3Mf6GpZ9No/qVvfsW45PghmT77b4nsR2Sh0mdPMbBzdoqOOTwgNAoKP 26 | RHsZtVdb9Gk7Jf2UHTEZY5NLKyXGH/qj4/7ajxixtHNvlD3oI14F 27 | -----END RSA PRIVATE KEY----- 28 | -------------------------------------------------------------------------------- /openapi.asyncapi.kakfa.proxy/.github/test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -x 3 | 4 | EXIT=0 5 | 6 | # GIVEN 7 | PORT="7114" 8 | INPUT='{"id": 1, "name": "Spike"}' 9 | EXPECTED='[{"id": 1, "name": "Spike"}]' 10 | echo \# Testing openapi.asyncapi.kakfa.proxy/ 11 | echo PORT="$PORT" 12 | echo INPUT="$INPUT" 13 | echo EXPECTED="$EXPECTED" 14 | echo 15 | 16 | curl "http://localhost:$PORT/pets" --header 'Content-Type: application/json' --header 'Idempotency-Key: 1' --data "$INPUT" 17 | 18 | # WHEN 19 | OUTPUT=$(curl "http://localhost:$PORT/pets") 20 | RESULT=$? 21 | echo RESULT="$RESULT" 22 | 23 | # THEN 24 | echo OUTPUT="$OUTPUT" 25 | echo EXPECTED="$EXPECTED" 26 | echo 27 | if [ "$RESULT" -eq 0 ] && [ "$OUTPUT" = "$EXPECTED" ]; then 28 | echo ✅ 29 | else 30 | echo ❌ 31 | EXIT=1 32 | fi 33 | 34 | exit $EXIT 35 | -------------------------------------------------------------------------------- /openapi.asyncapi.kakfa.proxy/README.md: -------------------------------------------------------------------------------- 1 | # openapi.asyncapi.kakfa.proxy 2 | 3 | ## Setup 4 | 5 | To `start` the Docker Compose stack defined in the [compose.yaml](compose.yaml) file, use: 6 | 7 | ```bash 8 | docker compose up -d 9 | ``` 10 | 11 | ### Test 12 | 13 | #### Create Pet 14 | 15 | ```bash 16 | curl 'http://localhost:7114/pets' \ 17 | --header 'Content-Type: application/json' \ 18 | --header 'Idempotency-Key: 1' \ 19 | --data '{ "id": 1, "name": "Spike" }' 20 | ``` 21 | 22 | #### Retrieve Pets 23 | 24 | ```bash 25 | curl 'http://localhost:7114/pets' --header 'Accept: application/json' 26 | ``` 27 | 28 | ## Teardown 29 | 30 | To remove any resources created by the Docker Compose stack, use: 31 | 32 | ```bash 33 | docker compose down 34 | ``` 35 | 36 | -------------------------------------------------------------------------------- /openapi.asyncapi.kakfa.proxy/etc/zilla.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: zilla-openapi-proxy 3 | catalogs: 4 | host_filesystem: 5 | type: filesystem 6 | options: 7 | subjects: 8 | http: 9 | path: specs/http-openapi.yaml 10 | kafka: 11 | path: specs/kafka-asyncapi.yaml 12 | bindings: 13 | north_openapi_server: 14 | type: openapi 15 | kind: server 16 | options: 17 | specs: 18 | my-openapi-spec: 19 | catalog: 20 | host_filesystem: 21 | subject: http 22 | exit: north_openapi_asyncapi_proxy 23 | north_openapi_asyncapi_proxy: 24 | type: openapi-asyncapi 25 | kind: proxy 26 | options: 27 | specs: 28 | openapi: 29 | my-openapi-spec: 30 | catalog: 31 | host_filesystem: 32 | subject: http 33 | asyncapi: 34 | my-asyncapi-spec: 35 | catalog: 36 | host_filesystem: 37 | subject: kafka 38 | routes: 39 | - exit: south_asyncapi_client 40 | when: 41 | - api-id: my-openapi-spec 42 | with: 43 | api-id: my-asyncapi-spec 44 | south_asyncapi_client: 45 | type: asyncapi 46 | kind: client 47 | options: 48 | specs: 49 | my-asyncapi-spec: 50 | catalog: 51 | host_filesystem: 52 | subject: kafka 53 | telemetry: 54 | exporters: 55 | stdout_logs_exporter: 56 | type: stdout 57 | -------------------------------------------------------------------------------- /openapi.asyncapi.kakfa.proxy/http-openapi.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | openapi: 3.0.0 3 | info: 4 | version: 1.0.0 5 | title: Swagger Petstore 6 | license: 7 | name: MIT 8 | servers: 9 | - url: http://localhost:7114 10 | paths: 11 | "/pets": 12 | get: 13 | summary: List all pets 14 | operationId: listPets 15 | tags: 16 | - pets 17 | responses: 18 | "200": 19 | description: A paged array of pets 20 | content: 21 | application/json: 22 | schema: 23 | "$ref": "#/components/schemas/Pets" 24 | default: 25 | description: unexpected error 26 | content: 27 | application/json: 28 | schema: 29 | "$ref": "#/components/schemas/Error" 30 | post: 31 | summary: Create a pet 32 | operationId: createPet 33 | tags: 34 | - pets 35 | requestBody: 36 | content: 37 | application/json: 38 | schema: 39 | "$ref": "#/components/schemas/Pet" 40 | required: true 41 | responses: 42 | "201": 43 | description: Null response 44 | default: 45 | description: unexpected error 46 | content: 47 | application/json: 48 | schema: 49 | "$ref": "#/components/schemas/Error" 50 | components: 51 | schemas: 52 | Pet: 53 | type: object 54 | required: 55 | - id 56 | - name 57 | properties: 58 | id: 59 | type: integer 60 | format: int64 61 | name: 62 | type: string 63 | tag: 64 | type: string 65 | Pets: 66 | type: array 67 | maxItems: 100 68 | items: 69 | "$ref": "#/components/schemas/Pet" 70 | Error: 71 | type: object 72 | required: 73 | - code 74 | - message 75 | properties: 76 | code: 77 | type: integer 78 | format: int32 79 | message: 80 | type: string 81 | -------------------------------------------------------------------------------- /openapi.proxy/.github/test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -x 3 | 4 | EXIT=0 5 | 6 | # GIVEN 7 | PORT="7114" 8 | EXPECTED='[{"id":1,"name":"string","tag":"string"}]' 9 | echo \# Testing openapi.proxy/ 10 | echo PORT="$PORT" 11 | echo EXPECTED="$EXPECTED" 12 | echo 13 | 14 | # WHEN 15 | OUTPUT=$(curl --silent --location "http://localhost:$PORT/pets" --header 'Accept: application/json') 16 | RESULT=$? 17 | echo RESULT="$RESULT" 18 | 19 | # THEN 20 | echo OUTPUT="$OUTPUT" 21 | echo EXPECTED="$EXPECTED" 22 | echo 23 | if [ "$RESULT" -eq 0 ] && [ "$OUTPUT" = "$EXPECTED" ]; then 24 | echo ✅ 25 | else 26 | echo ❌ 27 | EXIT=1 28 | fi 29 | 30 | exit $EXIT 31 | -------------------------------------------------------------------------------- /openapi.proxy/README.md: -------------------------------------------------------------------------------- 1 | # openapi.proxy 2 | 3 | This example demonstrates creating an HTTP request proxy where the available endpoints are defined in an OpenAPI schema [petstore-openapi.yaml](./petstore-openapi.yaml). 4 | 5 | ## Setup 6 | 7 | To `start` the Docker Compose stack defined in the [compose.yaml](compose.yaml) file, use: 8 | 9 | ```bash 10 | docker compose up -d 11 | ``` 12 | 13 | ## Test 14 | 15 | ```bash 16 | curl 'http://localhost:7114/pets' --header 'Accept: application/json' 17 | ``` 18 | 19 | ## Teardown 20 | 21 | To remove any resources created by the Docker Compose stack, use: 22 | 23 | ```bash 24 | docker compose down 25 | ``` 26 | -------------------------------------------------------------------------------- /openapi.proxy/compose.yaml: -------------------------------------------------------------------------------- 1 | name: ${NAMESPACE:-zilla-openapi-proxy} 2 | services: 3 | zilla: 4 | image: ghcr.io/aklivity/zilla:${ZILLA_VERSION:-latest} 5 | restart: unless-stopped 6 | hostname: zilla.examples.dev 7 | ports: 8 | - 7114:7114 9 | healthcheck: 10 | interval: 5s 11 | timeout: 3s 12 | retries: 5 13 | test: ["CMD", "bash", "-c", "echo -n '' > /dev/tcp/127.0.0.1/7114"] 14 | volumes: 15 | - ./petstore-openapi.yaml:/etc/zilla/specs/petstore-openapi.yaml 16 | - ./etc:/etc/zilla 17 | command: start -v -e 18 | 19 | openapi-mock: 20 | image: jormaechea/open-api-mocker:2.0.0 21 | volumes: 22 | - ./petstore-openapi.yaml:/app/petstore-openapi.yaml 23 | ports: 24 | - "8000:5000" 25 | command: open-api-mocker -s /app/petstore-openapi.yaml 26 | 27 | networks: 28 | default: 29 | driver: bridge 30 | -------------------------------------------------------------------------------- /openapi.proxy/etc/zilla.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: zilla-openapi-proxy 3 | catalogs: 4 | host_filesystem: 5 | type: filesystem 6 | options: 7 | subjects: 8 | petstore: 9 | path: specs/petstore-openapi.yaml 10 | bindings: 11 | north_openapi_server: 12 | type: openapi 13 | kind: server 14 | options: 15 | specs: 16 | my-openapi-spec: 17 | catalog: 18 | host_filesystem: 19 | subject: petstore 20 | exit: south_openapi_client 21 | south_openapi_client: 22 | type: openapi 23 | kind: client 24 | options: 25 | specs: 26 | my-openapi-spec: 27 | catalog: 28 | host_filesystem: 29 | subject: petstore 30 | tcp: 31 | host: openapi-mock 32 | port: 33 | - 5000 34 | telemetry: 35 | exporters: 36 | stdout_logs_exporter: 37 | type: stdout 38 | -------------------------------------------------------------------------------- /openapi.proxy/petstore-openapi.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | openapi: 3.0.0 3 | info: 4 | version: 1.0.0 5 | title: Swagger Petstore 6 | license: 7 | name: MIT 8 | servers: 9 | - url: http://localhost:7114 10 | paths: 11 | "/pets": 12 | get: 13 | summary: List all pets 14 | operationId: listPets 15 | tags: 16 | - pets 17 | responses: 18 | "200": 19 | description: A paged array of pets 20 | content: 21 | application/json: 22 | schema: 23 | "$ref": "#/components/schemas/Pets" 24 | default: 25 | description: unexpected error 26 | content: 27 | application/json: 28 | schema: 29 | "$ref": "#/components/schemas/Error" 30 | post: 31 | summary: Create a pet 32 | operationId: createPets 33 | tags: 34 | - pets 35 | requestBody: 36 | content: 37 | application/json: 38 | schema: 39 | "$ref": "#/components/schemas/Pet" 40 | required: true 41 | responses: 42 | "201": 43 | description: Null response 44 | default: 45 | description: unexpected error 46 | content: 47 | application/json: 48 | schema: 49 | "$ref": "#/components/schemas/Error" 50 | components: 51 | schemas: 52 | Pet: 53 | type: object 54 | required: 55 | - id 56 | - name 57 | properties: 58 | id: 59 | type: integer 60 | format: int64 61 | name: 62 | type: string 63 | tag: 64 | type: string 65 | Pets: 66 | type: array 67 | maxItems: 100 68 | items: 69 | "$ref": "#/components/schemas/Pet" 70 | Error: 71 | type: object 72 | required: 73 | - code 74 | - message 75 | properties: 76 | code: 77 | type: integer 78 | format: int32 79 | message: 80 | type: string 81 | -------------------------------------------------------------------------------- /sse.jwt/.github/test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -x 3 | 4 | EXIT=0 5 | PORT="7143" 6 | STREAM_URL="https://localhost:$PORT/events" 7 | NC_PORT="7001" 8 | MESSAGE="Hello, world" 9 | 10 | echo \# Testing sse.jwt 11 | echo "PORT=$PORT" 12 | echo "STREAM_URL=$STREAM_URL" 13 | echo "NC_PORT=$NC_PORT" 14 | echo "MESSAGE=$MESSAGE" 15 | 16 | JWT_TOKEN=$(docker compose run --rm \ 17 | jwt-cli encode \ 18 | --alg "RS256" \ 19 | --kid "example" \ 20 | --iss "https://auth.example.com" \ 21 | --aud "https://api.example.com" \ 22 | --exp=+1d \ 23 | --no-iat \ 24 | --payload "scope=proxy:stream" \ 25 | --secret @/private.pem | tr -d '\r\n') 26 | 27 | OUTPUT=$(curl --cacert test-ca.crt --no-buffer -N --max-time 5 "$STREAM_URL?access_token=${JWT_TOKEN}" & 28 | 29 | sleep 1 30 | 31 | echo '{ "data": "Hello, world" }' | timeout 5s nc localhost $NC_PORT 32 | ) 33 | 34 | if echo "$OUTPUT" | grep -q "$MESSAGE"; then 35 | echo ✅ 36 | else 37 | echo ❌ 38 | EXIT=1 39 | fi 40 | 41 | exit $EXIT 42 | -------------------------------------------------------------------------------- /sse.jwt/compose.yaml: -------------------------------------------------------------------------------- 1 | name: ${NAMESPACE:-zilla-sse-proxy-jwt} 2 | services: 3 | zilla: 4 | image: ghcr.io/aklivity/zilla:${ZILLA_VERSION:-latest} 5 | restart: unless-stopped 6 | hostname: zilla.examples.dev 7 | ports: 8 | - 7143:7143 9 | healthcheck: 10 | interval: 5s 11 | timeout: 3s 12 | retries: 5 13 | test: ["CMD", "bash", "-c", "echo -n '' > /dev/tcp/127.0.0.1/7143"] 14 | environment: 15 | KEYSTORE_PASSWORD: generated 16 | volumes: 17 | - ./etc:/etc/zilla 18 | - ./www:/var/www/ 19 | command: start -v -e 20 | 21 | sse-server: 22 | image: ghcr.io/aklivity/extras-sse-server:sha-42ad67e 23 | restart: unless-stopped 24 | ports: 25 | - 8001:8001 26 | - 7001:7001 27 | stdin_open: true 28 | tty: true 29 | healthcheck: 30 | interval: 5s 31 | timeout: 3s 32 | retries: 5 33 | test: netstat -an | grep 8001 > /dev/null; if [ 0 != $? ]; then exit 1; fi; 34 | command: -v -p 8001 -i 7001 35 | 36 | jwt-cli: 37 | image: bitnami/jwt-cli 38 | stdin_open: true 39 | tty: true 40 | profiles: 41 | - on-demand 42 | volumes: 43 | - ./private.pem:/private.pem 44 | 45 | networks: 46 | default: 47 | driver: bridge 48 | -------------------------------------------------------------------------------- /sse.jwt/etc/tls/localhost.p12: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aklivity/zilla-examples/7a5307d97d4a9e1fd731ac4fc3230f0b9e53e36e/sse.jwt/etc/tls/localhost.p12 -------------------------------------------------------------------------------- /sse.jwt/private.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN RSA PRIVATE KEY----- 2 | MIIEowIBAAKCAQEAqqEu50hX+43Bx4W1UYWnAVKwFm+vDbP0kuIOSLVNa+HKQdHT 3 | f+3Sei5UCnkskn796izA29D0DdCy3ET9oaKRHIJyKbqFl0rv6f516QzOoXKC6N01 4 | sXBHBE/ovs0wwDvlaW+gFGPgkzdcfUlyrWLDnLV7LcuQymhTND2uH0oR3wJnNENN 5 | /OFgM1KGPPDOe19YsIKdLqARgxrhZVsh06OurEviZTXOBFI5r+yac7haDwOQhLHX 6 | Nv+Y9MNvxs5QLWPFIM3bNUWfYrJnLrs4hGJS+y/KDM9Si+HL30QAFXy4YNO33J8D 7 | HjZ7ddG5n8/FqplOKvRtUgjcKWlxoGY4VdVaDQIDAQABAoIBAQCU5nnO3UPiQs+S 8 | 9SzIynB16BnASpAhziOlNl4utwCsH2roS1pdLkXZ16oBRcNOyEF+5LUcXllL4Q7k 9 | x9PqBLrLfU5w+dNwtrVspmFxEXsUSqDQ45HimU9wBOff8aIUb3CAtSemA47MCajN 10 | sJXBlwmLLJgk25sr9yR810KnDXVQ/RQXWGnRiyVvgHjYyrhQEam22z95+60yJPbQ 11 | hFphWPGDj4O1kOkuQil/ciD5Br1IcrftNp32UZE1c5TyBL9kyLH0iYGELt+UR4ht 12 | kbkBYF4ai0tiDRSpi1M0yd+5EDbi7kK5qI/XKZiDFptKXFkb49hln6mYpzuumzxh 13 | W7MqNswlAoGBANLAXpLmcLm5IqvjGRT3V34O1C1ztQzR9h8Vor0hpfVZEcUTjQ8i 14 | HwzkIfIBQqfOpxCrutbPSC9gOQIrjPS4O04B6qI/n1xY8n2bm7xEN0w68cjeS+Aa 15 | duB8NmXGW3iTCWkZ8LntKKiijY/qM4DNTJwOHNnx/gEoccSJBX7eQdVrAoGBAM9D 16 | lEv2A6+AwQP19kiN+PVGtzQx/VD8URCe8b5FOxEY42Cfic13ZUK2o35cZKrWgw5T 17 | gO+PtrLGg6iHYv6Igib2urHJrX8bv4GhuUk3e4ZUDfBvQEctb+3ziDIH+dz1idxG 18 | 3NBm3YIyROrvV4sZknziPAxSSs0nsRO99LKC7HRnAoGABfL8OHVc1UFOoz+D472f 19 | sKVGhAnyIGyE4BfCQkiC4Mwk3kVRBA5YhgqikqxKb2Y7/RJ78bhkN0ImGdOU5QuW 20 | UPto3i+hlf/EyJrt6ICcdwBq9tqflbHpjSi0eGcDCgJMj7T1wKnbLfd4u4lG7unc 21 | scMMOXLFUs8LdxrPFue7QAUCgYAmVIEgaybTViRb7yjU0iywb5uB85y9VWxIfpyG 22 | a5c42jlyrQ53CRWY+N3TiJK1ZWZnR9oYc6N9/GhsylzsZaJsQkTUjE+hqigIeTgi 23 | 6jfV58WMKYbhou2IO/l8By2WR3JvYkuD3wIoCdPk/s5Z0yjcH5qrTKy4tBZzaDXQ 24 | rZW9EQKBgEpfuGRtsUE+FUEqLcqfI3w5aYMKb0pEsG4zbvxB/4QeHdAawNEqeSrS 25 | TC0boMRlZ3Mf6GpZ9No/qVvfsW45PghmT77b4nsR2Sh0mdPMbBzdoqOOTwgNAoKP 26 | RHsZtVdb9Gk7Jf2UHTEZY5NLKyXGH/qj4/7ajxixtHNvlD3oI14F 27 | -----END RSA PRIVATE KEY----- 28 | -------------------------------------------------------------------------------- /sse.jwt/test-ca.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIFXjCCA0YCCQCuorYrG5wG+DANBgkqhkiG9w0BAQsFADBxMQswCQYDVQQGEwJV 3 | UzETMBEGA1UECAwKQ2FsaWZvcm5pYTESMBAGA1UEBwwJUGFsbyBBbHRvMREwDwYD 4 | VQQKDAhBa2xpdml0eTEUMBIGA1UECwwLRGV2ZWxvcG1lbnQxEDAOBgNVBAMMB1Rl 5 | c3QgQ0EwHhcNMjExMjIxMjMwNDExWhcNMzExMjE5MjMwNDExWjBxMQswCQYDVQQG 6 | EwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTESMBAGA1UEBwwJUGFsbyBBbHRvMREw 7 | DwYDVQQKDAhBa2xpdml0eTEUMBIGA1UECwwLRGV2ZWxvcG1lbnQxEDAOBgNVBAMM 8 | B1Rlc3QgQ0EwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDGPVgVO/zd 9 | ebwGWujKymJmztWZ5LIaZC+zY1SwKUBUA3+vrtO79ndi6WePiV0a2e7wov/ajFLp 10 | mor2RfGSMD8Yb9e98QSqnfy9Q5+ABmxFulgSJNwDjnxugZuk/6MILKMg7AsgqaxK 11 | wROSSLcom8b+gkbwXgHm57RKiitXlRM9ujdKibeHwfu7JTk6A7LwRbCVurTRqckw 12 | Q0/mA4mNuZ2AMGW+YL36TwTLfTAa4AVHEbI3U5+TyY3DoV7OoHI4Ec1/7B0CGzqK 13 | smKM3dKmXpRIc5NBZt+eKqphAhp0CD1eAnutWtepahjWyY1fAYk9hZ+ayU52dAMf 14 | +TbkPdMn5jfHhqs95VdfQjsKZPyNTYjhjHN9tAph1wKUG4XRATAvxhA2gpYgN9de 15 | 9ztWPboVzGosauQxPrXklO8CF7hsft0RlCCP9ojVLUkZ42vI/M1S3lD8pCDtPe46 16 | 2zQ9S3F1R7goF3AqWm4EQqu237+zL45pCbbWyyHeXHeDrv3DNXHcWXoFicNmCBl6 17 | nPPsVn9qgdhmJf5QcUKLkJEEtk94Uedv5qEqiJQYSPAIZHKnv4L5Li69kghTbUv/ 18 | Xquz2JdY5daj5eRurgZVjutkmMIaR4rJdhifBonlcKxoeSZoVbnoGzS5KcF9saz8 19 | 9qYU9LtF98CUMY7U4RPlVbA8D4YwDICgcwIDAQABMA0GCSqGSIb3DQEBCwUAA4IC 20 | AQDEzoEbCsHpae5M1I+vezg7w5NAItMU4WU9IccTylSB/gfIT+hWwIv9KiqTWjxw 21 | Y5Aj6XJ1mATHAMSQnNZCnP2Hw39/Nc3HcKmek2na2zK/TBSEFXudJmox8SK32r26 22 | nLstNlcYf7ixqJ5T7SOE2GJOcEUWpvTSbvQD0NvG81BVnSyUfX3FgkQLwwlyBoSE 23 | 7FwFz+ybrbisUHHqzPVnSblEDbKv6T9ai3FjbBegzPVSd9RmtB/DzxhdSk+kL1oD 24 | VSEPweSHEqamEnq2RIgLb7rYhmfohl0fGF5W6I3LvLqqe0KLRRID9V/jwBUGyICG 25 | W3jGu+68jOIUqXA4+gfOwXNktd4F7So48ySbghgrY0Umr4KSs6CTHhvSZ4ZG8QO/ 26 | ZyC+DjXsU3mihIBP/Q43YU7dYxFSdlCw79YnXvdWu7K7lZ1bIcbdH+RShcbvPcwg 27 | iM2qAvCgZBA8xHMDQeev8QdQjxtN+uBfee0mkvbzPbIh/0prywPHjAie/bXVBPVt 28 | VK6Gej2egPCIA5ThvGpmXh8kPd5Aqy1J++cmrzfYfPPsbmPGTLI0HFMhUuzIhFbd 29 | TzAV/Qj83r722s6f0v3KEEhfi3EZu3bRSGIyxVtebtOLGvEb2PjJrktyVJgivVFX 30 | uHHpz76QFOcLy1F962Hfj51NnIROOySyl12JkhPRTlMoiQ== 31 | -----END CERTIFICATE----- 32 | -------------------------------------------------------------------------------- /sse.kafka.fanout/.github/test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -x 3 | 4 | EXIT=0 5 | 6 | # GIVEN 7 | PORT="7114" 8 | KAFKA_BOOTSTRAP_SERVER="kafka.examples.dev:29092" 9 | INPUT='{"id":1,"name":"Hello World!"}' 10 | EXPECTED='data:{"id":1,"name":"Hello World!"}' 11 | echo \# Testing sse.kafka.fanout 12 | echo PORT="$PORT" 13 | echo KAFKA_BOOTSTRAP_SERVER="$KAFKA_BOOTSTRAP_SERVER" 14 | echo INPUT="$INPUT" 15 | echo EXPECTED="$EXPECTED" 16 | echo 17 | 18 | # WHEN 19 | 20 | # Ensure topic exists 21 | docker compose -p zilla-sse-kafka-fanout exec -T kafka \ 22 | kafka-topics.sh --describe --topic events --bootstrap-server $KAFKA_BOOTSTRAP_SERVER 23 | 24 | # push messages to sse server 25 | echo "$INPUT" | 26 | docker compose -p zilla-sse-kafka-fanout exec -T kafkacat \ 27 | kafkacat -P \ 28 | -b $KAFKA_BOOTSTRAP_SERVER \ 29 | -t events \ 30 | -k "1" 31 | 32 | sleep 5 33 | # send request to zilla 34 | OUTPUT=$(timeout 3s curl -N --http2 -H "Accept:text/event-stream" "http://localhost:$PORT/events" | grep "^data:") 35 | 36 | # THEN 37 | echo OUTPUT="$OUTPUT" 38 | echo EXPECTED="$EXPECTED" 39 | echo 40 | if [ "$OUTPUT" = "$EXPECTED" ]; then 41 | echo ✅ 42 | else 43 | echo ❌ 44 | EXIT=1 45 | fi 46 | 47 | exit $EXIT 48 | -------------------------------------------------------------------------------- /sse.kafka.fanout/README.md: -------------------------------------------------------------------------------- 1 | # sse.kafka.fanout 2 | 3 | Listens on http port `7114` or https port `7114` and will stream back whatever is published to the `events` topic in Kafka. 4 | 5 | ## Requirements 6 | 7 | - docker compose 8 | - sse-cat 9 | 10 | ### Install sse-cat client 11 | 12 | Requires Server-Sent Events client, such as `sse-cat` version `2.0.5` or higher on `node` version `14` or higher. 13 | 14 | ```bash 15 | npm install -g sse-cat 16 | ``` 17 | 18 | ## Setup 19 | 20 | To `start` the Docker Compose stack defined in the [compose.yaml](compose.yaml) file, use: 21 | 22 | ```bash 23 | docker compose up -d 24 | ``` 25 | 26 | ### Verify behavior 27 | 28 | Open a `text/event-stream` from the sse endpoint in a terminal. 29 | 30 | ```bash 31 | curl -N --http2 -H "Accept:text/event-stream" "http://localhost:7114/events" 32 | ``` 33 | 34 | In a new terminal send a text payload from the `kafkacat` producer client. 35 | 36 | ```bash 37 | echo '{ "id": 1, "name": "Hello World!"}' | docker compose -p zilla-sse-kafka-fanout exec -T kafkacat \ 38 | kafkacat -P -b kafka.examples.dev:29092 -t events -k "1" 39 | ``` 40 | 41 | The text payload will be the `data:` of the sse message seen in the `text/event-stream` terminal session. 42 | 43 | Note that only the latest messages with distinct keys are guaranteed to be retained by a compacted Kafka topic, so use different values for `-k` above to retain more than one message in the `events` topic. 44 | 45 | ### Browser 46 | 47 | Browse to [http://localhost:7114/index.html]() and make sure to visit the `localhost` site and trust the `localhost` certificate. 48 | 49 | Click the `Go` button to attach the browser SSE event source to Kafka via Zilla. 50 | 51 | All non-compacted messages with distinct keys in the `events` Kafka topic are replayed to the browser. 52 | 53 | Open the browser developer tools console to see additional logging, such as the `open` event. 54 | 55 | Additional messages produced to the `events` Kafka topic then arrive at the browser live. 56 | 57 | ### Reliability 58 | 59 | Simulate connection loss by stopping the `zilla` service in the `docker` stack. 60 | 61 | ```bash 62 | docker compose -p zilla-sse-kafka-fanout stop zilla 63 | ``` 64 | 65 | This causes errors to be logged in the browser console during repeated attempts to automatically reconnect. 66 | 67 | Simulate connection recovery by starting the `zilla` service again. 68 | 69 | ```bash 70 | docker compose -p zilla-sse-kafka-fanout start zilla 71 | ``` 72 | 73 | Any messages produced to the `events` Kafka topic while the browser was attempting to reconnect are now delivered immediately. 74 | 75 | Additional messages produced to the `events` Kafka topic then arrive at the browser live. 76 | 77 | ## Teardown 78 | 79 | To remove any resources created by the Docker Compose stack, use: 80 | 81 | ```bash 82 | docker compose down 83 | ``` 84 | -------------------------------------------------------------------------------- /sse.kafka.fanout/compose.yaml: -------------------------------------------------------------------------------- 1 | name: ${NAMESPACE:-zilla-sse-kafka-fanout} 2 | services: 3 | zilla: 4 | image: ghcr.io/aklivity/zilla:${ZILLA_VERSION:-latest} 5 | restart: unless-stopped 6 | hostname: zilla.examples.dev 7 | ports: 8 | - 7114:7114 9 | healthcheck: 10 | interval: 5s 11 | timeout: 3s 12 | retries: 5 13 | test: ["CMD", "bash", "-c", "echo -n '' > /dev/tcp/127.0.0.1/7114"] 14 | environment: 15 | KAFKA_BOOTSTRAP_SERVER: kafka.examples.dev:29092 16 | volumes: 17 | - ./etc:/etc/zilla 18 | - ./www:/var/www 19 | command: start -v -e 20 | 21 | kafka: 22 | image: bitnami/kafka:3.5 23 | restart: unless-stopped 24 | hostname: kafka.examples.dev 25 | ports: 26 | - 9092:9092 27 | healthcheck: 28 | test: /opt/bitnami/kafka/bin/kafka-cluster.sh cluster-id --bootstrap-server kafka.examples.dev:29092 || exit 1 29 | interval: 1s 30 | timeout: 60s 31 | retries: 60 32 | environment: 33 | ALLOW_PLAINTEXT_LISTENER: "yes" 34 | KAFKA_CFG_NODE_ID: "1" 35 | KAFKA_CFG_BROKER_ID: "1" 36 | KAFKA_CFG_GROUP_INITIAL_REBALANCE_DELAY_MS: "0" 37 | KAFKA_CFG_CONTROLLER_QUORUM_VOTERS: "1@127.0.0.1:9093" 38 | KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP: "CLIENT:PLAINTEXT,INTERNAL:PLAINTEXT,CONTROLLER:PLAINTEXT" 39 | KAFKA_CFG_CONTROLLER_LISTENER_NAMES: "CONTROLLER" 40 | KAFKA_CFG_LOG_DIRS: "/tmp/logs" 41 | KAFKA_CFG_PROCESS_ROLES: "broker,controller" 42 | KAFKA_CFG_LISTENERS: "CLIENT://:9092,INTERNAL://:29092,CONTROLLER://:9093" 43 | KAFKA_CFG_INTER_BROKER_LISTENER_NAME: "INTERNAL" 44 | KAFKA_CFG_ADVERTISED_LISTENERS: "CLIENT://localhost:9092,INTERNAL://kafka.examples.dev:29092" 45 | KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE: "true" 46 | 47 | kafka-init: 48 | image: bitnami/kafka:3.5 49 | user: root 50 | depends_on: 51 | kafka: 52 | condition: service_healthy 53 | restart: true 54 | deploy: 55 | restart_policy: 56 | condition: none 57 | max_attempts: 0 58 | entrypoint: ["/bin/sh", "-c"] 59 | environment: 60 | KAFKA_BOOTSTRAP_SERVER: kafka.examples.dev:29092 61 | command: 62 | - | 63 | echo -e "Creating kafka topic"; 64 | /opt/bitnami/kafka/bin/kafka-topics.sh --bootstrap-server $${KAFKA_BOOTSTRAP_SERVER} --create --if-not-exists --topic events --config cleanup.policy=compact 65 | echo -e "Successfully created the following topics:"; 66 | /opt/bitnami/kafka/bin/kafka-topics.sh --bootstrap-server $${KAFKA_BOOTSTRAP_SERVER} --list; 67 | 68 | kafka-ui: 69 | image: ghcr.io/kafbat/kafka-ui:v1.0.0 70 | restart: unless-stopped 71 | ports: 72 | - 8080:8080 73 | depends_on: 74 | kafka: 75 | condition: service_healthy 76 | restart: true 77 | environment: 78 | KAFKA_CLUSTERS_0_NAME: local 79 | KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka.examples.dev:29092 80 | 81 | kafkacat: 82 | image: confluentinc/cp-kafkacat:7.1.9 83 | command: "bash" 84 | stdin_open: true 85 | tty: true 86 | 87 | networks: 88 | default: 89 | driver: bridge 90 | -------------------------------------------------------------------------------- /sse.kafka.fanout/etc/zilla.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: example 3 | bindings: 4 | north_tcp_server: 5 | type: tcp 6 | kind: server 7 | options: 8 | host: 0.0.0.0 9 | port: 10 | - 7114 11 | routes: 12 | - when: 13 | - port: 7114 14 | exit: north_http_server 15 | north_http_server: 16 | type: http 17 | kind: server 18 | options: 19 | access-control: 20 | policy: cross-origin 21 | routes: 22 | - when: 23 | - headers: 24 | :scheme: http 25 | :path: /events 26 | exit: north_sse_server 27 | - when: 28 | - headers: 29 | :scheme: http 30 | exit: east_http_filesystem_mapping 31 | east_http_filesystem_mapping: 32 | type: http-filesystem 33 | kind: proxy 34 | routes: 35 | - exit: east_filesystem_server 36 | when: 37 | - path: /{path} 38 | with: 39 | path: ${params.path} 40 | east_filesystem_server: 41 | type: filesystem 42 | kind: server 43 | options: 44 | location: /var/www/ 45 | north_sse_server: 46 | type: sse 47 | kind: server 48 | exit: north_sse_kafka_mapping 49 | north_sse_kafka_mapping: 50 | type: sse-kafka 51 | kind: proxy 52 | routes: 53 | - when: 54 | - path: /{topic} 55 | exit: north_kafka_cache_client 56 | with: 57 | topic: ${params.topic} 58 | north_kafka_cache_client: 59 | type: kafka 60 | kind: cache_client 61 | exit: south_kafka_cache_server 62 | south_kafka_cache_server: 63 | type: kafka 64 | kind: cache_server 65 | options: 66 | bootstrap: 67 | - events 68 | exit: south_kafka_client 69 | south_kafka_client: 70 | type: kafka 71 | kind: client 72 | options: 73 | servers: 74 | - ${{env.KAFKA_BOOTSTRAP_SERVER}} 75 | exit: south_tcp_client 76 | south_tcp_client: 77 | type: tcp 78 | kind: client 79 | telemetry: 80 | exporters: 81 | stdout_logs_exporter: 82 | type: stdout 83 | -------------------------------------------------------------------------------- /sse.kafka.fanout/www/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | Welcome to Zilla! 5 | 24 | 25 | 26 | 27 | 28 |

Location

29 | 32 | 33 | 34 |

Messages

35 |
36 |
37 | 38 | 39 | 40 | 41 | -------------------------------------------------------------------------------- /tcp.echo/.github/test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -x 3 | 4 | EXIT=0 5 | 6 | # GIVEN 7 | PORT="12345" 8 | INPUT="Hello, Zilla!" 9 | EXPECTED="Hello, Zilla!" 10 | echo \# Testing tcp.echo/ 11 | echo PORT="$PORT" 12 | echo INPUT="$INPUT" 13 | echo EXPECTED="$EXPECTED" 14 | echo 15 | 16 | # WHEN 17 | OUTPUT=$(echo "$INPUT" | nc -w 1 localhost $PORT) 18 | RESULT=$? 19 | echo RESULT="$RESULT" 20 | 21 | # THEN 22 | echo OUTPUT="$OUTPUT" 23 | echo EXPECTED="$EXPECTED" 24 | echo 25 | if [ "$RESULT" -eq 0 ] && [ "$OUTPUT" = "$EXPECTED" ]; then 26 | echo ✅ 27 | else 28 | echo ❌ 29 | EXIT=1 30 | fi 31 | 32 | exit $EXIT 33 | -------------------------------------------------------------------------------- /tcp.echo/README.md: -------------------------------------------------------------------------------- 1 | # tcp.echo 2 | 3 | Listens on tcp port `12345` and will echo back whatever is sent to the server. 4 | 5 | ## Requirements 6 | 7 | - nc 8 | - docker compose 9 | 10 | ## Setup 11 | 12 | To `start` the Docker Compose stack defined in the [compose.yaml](compose.yaml) file, use: 13 | 14 | ```bash 15 | docker compose up -d 16 | ``` 17 | 18 | ### Verify behavior 19 | 20 | ```bash 21 | nc localhost 12345 22 | ``` 23 | 24 | Type a `Hello, world` message and press `enter`. 25 | 26 | output: 27 | 28 | ```text 29 | Hello, world 30 | Hello, world 31 | ``` 32 | 33 | ## Teardown 34 | 35 | To remove any resources created by the Docker Compose stack, use: 36 | 37 | ```bash 38 | docker compose down 39 | ``` 40 | -------------------------------------------------------------------------------- /tcp.echo/compose.yaml: -------------------------------------------------------------------------------- 1 | name: ${NAMESPACE:-zilla-tcp-echo} 2 | services: 3 | zilla: 4 | image: ghcr.io/aklivity/zilla:${ZILLA_VERSION:-latest} 5 | restart: unless-stopped 6 | hostname: zilla.examples.dev 7 | ports: 8 | - 12345:12345 9 | healthcheck: 10 | interval: 5s 11 | timeout: 3s 12 | retries: 5 13 | test: ["CMD", "bash", "-c", "echo -n '' > /dev/tcp/127.0.0.1/12345"] 14 | volumes: 15 | - ./etc:/etc/zilla 16 | command: start -v -e 17 | 18 | networks: 19 | default: 20 | driver: bridge 21 | -------------------------------------------------------------------------------- /tcp.echo/etc/zilla.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: example 3 | bindings: 4 | north_tcp_server: 5 | type: tcp 6 | kind: server 7 | options: 8 | host: 0.0.0.0 9 | port: 12345 10 | exit: north_echo_server 11 | north_echo_server: 12 | type: echo 13 | kind: server 14 | telemetry: 15 | exporters: 16 | stdout_logs_exporter: 17 | type: stdout 18 | -------------------------------------------------------------------------------- /tcp.reflect/.github/test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -x 3 | 4 | EXIT=0 5 | 6 | # GIVEN 7 | PORT="12345" 8 | INPUT="Hello!" 9 | INPUT1="Hello from client 1" 10 | INPUT2="Hello from client 2" 11 | 12 | echo \# Testing tcp.reflect 13 | echo PORT="$PORT" 14 | echo INPUT1="$INPUT1" 15 | echo INPUT2="$INPUT2" 16 | 17 | # WHEN 18 | 19 | echo "$INPUT" | nc -w 1 localhost $PORT 20 | 21 | { 22 | (echo "$INPUT1"; sleep 2) | nc -w 1 localhost $PORT & 23 | PID1=$! 24 | (echo "$INPUT2"; sleep 2) | nc -w 1 localhost $PORT & 25 | PID2=$! 26 | 27 | wait $PID1 $PID2 28 | } > output.out 2>&1 29 | 30 | RESULT1=$? 31 | RESULT2=$? 32 | OUTPUT=$(cat output.out) 33 | 34 | # THEN 35 | COUNT1=$(echo "$OUTPUT" | grep -Fx "$INPUT1" | wc -l) 36 | COUNT2=$(echo "$OUTPUT" | grep -Fx "$INPUT2" | wc -l) 37 | 38 | if [ "$RESULT1" -eq 0 ] && [ "$RESULT2" -eq 0 ] && [ "$COUNT1" -eq 2 ] && [ "$COUNT2" -eq 2 ]; then 39 | echo ✅ 40 | else 41 | echo ❌ 42 | EXIT=1 43 | fi 44 | 45 | exit $EXIT 46 | -------------------------------------------------------------------------------- /tcp.reflect/README.md: -------------------------------------------------------------------------------- 1 | # tcp.reflect 2 | 3 | Listens on tcp port `12345` and will echo back whatever is sent to the server, broadcasting to all clients. 4 | 5 | ## Requirements 6 | 7 | - nc 8 | - docker compose 9 | 10 | ## Setup 11 | 12 | To `start` the Docker Compose stack defined in the [compose.yaml](compose.yaml) file, use: 13 | 14 | ```bash 15 | docker compose up -d 16 | ``` 17 | 18 | ### Verify behavior 19 | 20 | Connect each client first, then send `Hello, one` from first client, then send `Hello, two` from second client. 21 | 22 | ```bash 23 | nc localhost 12345 24 | ``` 25 | 26 | Type a `Hello, one` message and press `enter`. 27 | 28 | output: 29 | 30 | ```text 31 | Hello, one 32 | Hello, one 33 | Hello, two 34 | ``` 35 | 36 | ```bash 37 | nc localhost 12345 38 | ``` 39 | 40 | Type a `Hello, two` message and press `enter`. 41 | 42 | output: 43 | 44 | ```text 45 | Hello, one 46 | Hello, two 47 | Hello, two 48 | ``` 49 | 50 | ## Teardown 51 | 52 | To remove any resources created by the Docker Compose stack, use: 53 | 54 | ```bash 55 | docker compose down 56 | ``` 57 | -------------------------------------------------------------------------------- /tcp.reflect/compose.yaml: -------------------------------------------------------------------------------- 1 | name: ${NAMESPACE:-zilla-tcp-reflect} 2 | services: 3 | zilla: 4 | image: ghcr.io/aklivity/zilla:${ZILLA_VERSION:-latest} 5 | restart: unless-stopped 6 | hostname: zilla.examples.dev 7 | ports: 8 | - 12345:12345 9 | healthcheck: 10 | interval: 5s 11 | timeout: 3s 12 | retries: 5 13 | test: ["CMD", "bash", "-c", "echo -n '' > /dev/tcp/127.0.0.1/12345"] 14 | volumes: 15 | - ./etc:/etc/zilla 16 | command: start -v -e 17 | 18 | networks: 19 | default: 20 | driver: bridge 21 | -------------------------------------------------------------------------------- /tcp.reflect/etc/zilla.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: example 3 | bindings: 4 | north_tcp_server: 5 | type: tcp 6 | kind: server 7 | options: 8 | host: 0.0.0.0 9 | port: 12345 10 | exit: north_fan_server 11 | north_fan_server: 12 | type: fan 13 | kind: server 14 | exit: north_echo_server 15 | north_echo_server: 16 | type: echo 17 | kind: server 18 | telemetry: 19 | exporters: 20 | stdout_logs_exporter: 21 | type: stdout 22 | -------------------------------------------------------------------------------- /tls.echo/.github/test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -x 3 | 4 | EXIT=0 5 | 6 | # GIVEN 7 | PORT="23456" 8 | INPUT="Hello, Zilla!" 9 | EXPECTED="Hello, Zilla!" 10 | echo \# Testing tls.echo/ 11 | echo PORT="$PORT" 12 | echo INPUT="$INPUT" 13 | echo EXPECTED="$EXPECTED" 14 | echo 15 | 16 | # WHEN 17 | OUTPUT=$(echo "$INPUT"; sleep 2 | openssl s_client -connect localhost:$PORT -CAfile test-ca.crt -quiet -alpn echo -no_ign_eof) 18 | RESULT=$? 19 | echo RESULT="$RESULT" 20 | 21 | # THEN 22 | echo OUTPUT="$OUTPUT" 23 | echo EXPECTED="$EXPECTED" 24 | echo 25 | 26 | if [ "$RESULT" -eq 0 ] && [ "$OUTPUT" = "$EXPECTED" ]; then 27 | echo ✅ 28 | else 29 | echo ❌ 30 | EXIT=1 31 | fi 32 | 33 | exit $EXIT 34 | -------------------------------------------------------------------------------- /tls.echo/.gitignore: -------------------------------------------------------------------------------- 1 | .vault/ 2 | -------------------------------------------------------------------------------- /tls.echo/README.md: -------------------------------------------------------------------------------- 1 | # tls.echo 2 | 3 | Listens on tls port `23456` and will echo back whatever is sent to the server. 4 | 5 | ## Requirements 6 | 7 | - docker compose 8 | - openssl 9 | 10 | ## Setup 11 | 12 | To `start` the Docker Compose stack defined in the [compose.yaml](compose.yaml) file, use: 13 | 14 | ```bash 15 | docker compose up -d 16 | ``` 17 | 18 | ### Verify behavior 19 | 20 | ```bash 21 | openssl s_client -connect localhost:23456 -CAfile test-ca.crt -quiet -alpn echo 22 | ``` 23 | 24 | output: 25 | 26 | ```text 27 | depth=1 C = US, ST = California, L = Palo Alto, O = Aklivity, OU = Development, CN = Test CA 28 | verify return:1 29 | depth=0 C = US, ST = California, L = Palo Alto, O = Aklivity, OU = Development, CN = localhost 30 | verify return:1 31 | ``` 32 | 33 | Type a `Hello, world` message and press `enter`. 34 | 35 | output: 36 | 37 | ```text 38 | Hello, world 39 | Hello, world 40 | ``` 41 | 42 | ## Teardown 43 | 44 | To remove any resources created by the Docker Compose stack, use: 45 | 46 | ```bash 47 | docker compose down 48 | ``` 49 | -------------------------------------------------------------------------------- /tls.echo/compose.yaml: -------------------------------------------------------------------------------- 1 | name: ${NAMESPACE:-zilla-tls-echo} 2 | services: 3 | zilla: 4 | image: ghcr.io/aklivity/zilla:${ZILLA_VERSION:-latest} 5 | restart: unless-stopped 6 | hostname: zilla.examples.dev 7 | ports: 8 | - 23456:23456 9 | healthcheck: 10 | interval: 5s 11 | timeout: 3s 12 | retries: 5 13 | test: ["CMD", "bash", "-c", "echo -n '' > /dev/tcp/127.0.0.1/23456"] 14 | environment: 15 | KEYSTORE_PASSWORD: generated 16 | volumes: 17 | - ./etc:/etc/zilla 18 | command: start -v -e 19 | 20 | networks: 21 | default: 22 | driver: bridge 23 | -------------------------------------------------------------------------------- /tls.echo/etc/tls/localhost.p12: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aklivity/zilla-examples/7a5307d97d4a9e1fd731ac4fc3230f0b9e53e36e/tls.echo/etc/tls/localhost.p12 -------------------------------------------------------------------------------- /tls.echo/etc/zilla.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: example 3 | vaults: 4 | my_servers: 5 | type: filesystem 6 | options: 7 | keys: 8 | store: tls/localhost.p12 9 | type: pkcs12 10 | password: ${{env.KEYSTORE_PASSWORD}} 11 | bindings: 12 | north_tcp_server: 13 | type: tcp 14 | kind: server 15 | options: 16 | host: 0.0.0.0 17 | port: 23456 18 | exit: north_tls_server 19 | north_tls_server: 20 | type: tls 21 | kind: server 22 | vault: my_servers 23 | options: 24 | keys: 25 | - localhost 26 | sni: 27 | - localhost 28 | alpn: 29 | - echo 30 | routes: 31 | - when: 32 | - alpn: echo 33 | exit: north_echo_server 34 | north_echo_server: 35 | type: echo 36 | kind: server 37 | telemetry: 38 | exporters: 39 | stdout_logs_exporter: 40 | type: stdout 41 | -------------------------------------------------------------------------------- /tls.echo/localhost.p12: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aklivity/zilla-examples/7a5307d97d4a9e1fd731ac4fc3230f0b9e53e36e/tls.echo/localhost.p12 -------------------------------------------------------------------------------- /tls.echo/regen: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | mkdir .vault 3 | cd .vault 4 | openssl genrsa -aes256 -out test-ca.key 4096 5 | 6 | openssl req -new -key test-ca.key -x509 -out test-ca.crt -days 3650 -subj "/C=US/ST=California/L=Palo Alto/O=Aklivity/OU=Development/CN=Test CA" 7 | 8 | openssl req -new -nodes -newkey rsa:4096 -keyout localhost.key -out localhost.req -batch -subj "/C=US/ST=California/L=Palo Alto/O=Aklivity/OU=Development/CN=localhost" 9 | 10 | openssl x509 -req -in localhost.req -CA test-ca.crt -CAkey test-ca.key -CAcreateserial -out localhost.crt -days 3650 -sha256 11 | 12 | cat localhost.key > localhost.chain.pem 13 | cat localhost.crt >> localhost.chain.pem 14 | cat test-ca.crt >> localhost.chain.pem 15 | 16 | echo "Export password: generated" 17 | openssl pkcs12 -export -in localhost.chain.pem -name localhost -out localhost.p12 18 | 19 | cp test-ca.crt .. 20 | cp localhost.p12 ../chart/files 21 | -------------------------------------------------------------------------------- /tls.echo/test-ca.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIFXjCCA0YCCQCuorYrG5wG+DANBgkqhkiG9w0BAQsFADBxMQswCQYDVQQGEwJV 3 | UzETMBEGA1UECAwKQ2FsaWZvcm5pYTESMBAGA1UEBwwJUGFsbyBBbHRvMREwDwYD 4 | VQQKDAhBa2xpdml0eTEUMBIGA1UECwwLRGV2ZWxvcG1lbnQxEDAOBgNVBAMMB1Rl 5 | c3QgQ0EwHhcNMjExMjIxMjMwNDExWhcNMzExMjE5MjMwNDExWjBxMQswCQYDVQQG 6 | EwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTESMBAGA1UEBwwJUGFsbyBBbHRvMREw 7 | DwYDVQQKDAhBa2xpdml0eTEUMBIGA1UECwwLRGV2ZWxvcG1lbnQxEDAOBgNVBAMM 8 | B1Rlc3QgQ0EwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDGPVgVO/zd 9 | ebwGWujKymJmztWZ5LIaZC+zY1SwKUBUA3+vrtO79ndi6WePiV0a2e7wov/ajFLp 10 | mor2RfGSMD8Yb9e98QSqnfy9Q5+ABmxFulgSJNwDjnxugZuk/6MILKMg7AsgqaxK 11 | wROSSLcom8b+gkbwXgHm57RKiitXlRM9ujdKibeHwfu7JTk6A7LwRbCVurTRqckw 12 | Q0/mA4mNuZ2AMGW+YL36TwTLfTAa4AVHEbI3U5+TyY3DoV7OoHI4Ec1/7B0CGzqK 13 | smKM3dKmXpRIc5NBZt+eKqphAhp0CD1eAnutWtepahjWyY1fAYk9hZ+ayU52dAMf 14 | +TbkPdMn5jfHhqs95VdfQjsKZPyNTYjhjHN9tAph1wKUG4XRATAvxhA2gpYgN9de 15 | 9ztWPboVzGosauQxPrXklO8CF7hsft0RlCCP9ojVLUkZ42vI/M1S3lD8pCDtPe46 16 | 2zQ9S3F1R7goF3AqWm4EQqu237+zL45pCbbWyyHeXHeDrv3DNXHcWXoFicNmCBl6 17 | nPPsVn9qgdhmJf5QcUKLkJEEtk94Uedv5qEqiJQYSPAIZHKnv4L5Li69kghTbUv/ 18 | Xquz2JdY5daj5eRurgZVjutkmMIaR4rJdhifBonlcKxoeSZoVbnoGzS5KcF9saz8 19 | 9qYU9LtF98CUMY7U4RPlVbA8D4YwDICgcwIDAQABMA0GCSqGSIb3DQEBCwUAA4IC 20 | AQDEzoEbCsHpae5M1I+vezg7w5NAItMU4WU9IccTylSB/gfIT+hWwIv9KiqTWjxw 21 | Y5Aj6XJ1mATHAMSQnNZCnP2Hw39/Nc3HcKmek2na2zK/TBSEFXudJmox8SK32r26 22 | nLstNlcYf7ixqJ5T7SOE2GJOcEUWpvTSbvQD0NvG81BVnSyUfX3FgkQLwwlyBoSE 23 | 7FwFz+ybrbisUHHqzPVnSblEDbKv6T9ai3FjbBegzPVSd9RmtB/DzxhdSk+kL1oD 24 | VSEPweSHEqamEnq2RIgLb7rYhmfohl0fGF5W6I3LvLqqe0KLRRID9V/jwBUGyICG 25 | W3jGu+68jOIUqXA4+gfOwXNktd4F7So48ySbghgrY0Umr4KSs6CTHhvSZ4ZG8QO/ 26 | ZyC+DjXsU3mihIBP/Q43YU7dYxFSdlCw79YnXvdWu7K7lZ1bIcbdH+RShcbvPcwg 27 | iM2qAvCgZBA8xHMDQeev8QdQjxtN+uBfee0mkvbzPbIh/0prywPHjAie/bXVBPVt 28 | VK6Gej2egPCIA5ThvGpmXh8kPd5Aqy1J++cmrzfYfPPsbmPGTLI0HFMhUuzIhFbd 29 | TzAV/Qj83r722s6f0v3KEEhfi3EZu3bRSGIyxVtebtOLGvEb2PjJrktyVJgivVFX 30 | uHHpz76QFOcLy1F962Hfj51NnIROOySyl12JkhPRTlMoiQ== 31 | -----END CERTIFICATE----- 32 | -------------------------------------------------------------------------------- /tls.reflect/.github/test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -x 3 | 4 | EXIT=0 5 | 6 | # GIVEN 7 | PORT="23456" 8 | INPUT="Hello!" 9 | INPUT1="Hello, Zilla!" 10 | INPUT2="Bye, Zilla!" 11 | EXPECTED="Hello, Zilla! 12 | Bye, Zilla!" 13 | 14 | echo \# Testing tls.reflect/ 15 | echo PORT="$PORT" 16 | echo INPUT="$INPUT" 17 | echo EXPECTED="$EXPECTED" 18 | echo 19 | 20 | # WHEN 21 | 22 | for i in $(seq 1 5); do 23 | echo "$INPUT" | openssl s_client -connect localhost:$PORT -CAfile test-ca.crt -quiet -alpn echo -no_ign_eof 24 | 25 | if [ $? -eq 0 ]; then 26 | echo "✅ Zilla is reachable." 27 | break 28 | fi 29 | 30 | sleep 2 31 | done 32 | 33 | OUTPUT=$( 34 | echo "$INPUT1"; sleep 2 | openssl s_client -connect localhost:$PORT -CAfile test-ca.crt -quiet -alpn echo -no_ign_eof 35 | echo "$INPUT2"; sleep 2 | openssl s_client -connect localhost:$PORT -CAfile test-ca.crt -quiet -alpn echo -no_ign_eof) 36 | RESULT=$? 37 | echo RESULT="$RESULT" 38 | 39 | # THEN 40 | echo OUTPUT="$OUTPUT" 41 | echo EXPECTED="$EXPECTED" 42 | echo 43 | 44 | if [ "$RESULT" -eq 0 ] && [ "$OUTPUT" = "$EXPECTED" ]; then 45 | echo ✅ 46 | else 47 | echo ❌ 48 | EXIT=1 49 | fi 50 | 51 | exit $EXIT 52 | -------------------------------------------------------------------------------- /tls.reflect/README.md: -------------------------------------------------------------------------------- 1 | # tls.reflect 2 | 3 | Listens on tls port `23456` and will echo back whatever is sent to the server, broadcasting to all clients. 4 | 5 | ## Requirements 6 | 7 | - docker compose 8 | - openssl 9 | 10 | ## Setup 11 | 12 | To `start` the Docker Compose stack defined in the [compose.yaml](compose.yaml) file, use: 13 | 14 | ```bash 15 | docker compose up -d 16 | ``` 17 | 18 | ### Verify behavior 19 | 20 | Connect each client first, then send `Hello, one` from first client, then send `Hello, two` from second client. 21 | 22 | ```bash 23 | openssl s_client -connect localhost:23456 -CAfile test-ca.crt -quiet -alpn echo 24 | ``` 25 | 26 | output: 27 | 28 | ```text 29 | depth=1 C = US, ST = California, L = Palo Alto, O = Aklivity, OU = Development, CN = Test CA 30 | verify return:1 31 | depth=0 C = US, ST = California, L = Palo Alto, O = Aklivity, OU = Development, CN = localhost 32 | verify return:1 33 | ``` 34 | 35 | Type a `Hello, one` message and press `enter`. 36 | 37 | output: 38 | 39 | ```text 40 | Hello, one 41 | Hello, one 42 | Hello, two 43 | ``` 44 | 45 | ```bash 46 | openssl s_client -connect localhost:23456 -CAfile test-ca.crt -quiet -alpn echo 47 | ``` 48 | 49 | output: 50 | 51 | ```text 52 | depth=1 C = US, ST = California, L = Palo Alto, O = Aklivity, OU = Development, CN = Test CA 53 | verify return:1 54 | depth=0 C = US, ST = California, L = Palo Alto, O = Aklivity, OU = Development, CN = localhost 55 | verify return:1 56 | ``` 57 | 58 | Type a `Hello, two` message and press `enter`. 59 | 60 | output: 61 | 62 | ```text 63 | Hello, one 64 | Hello, two 65 | Hello, two 66 | ``` 67 | 68 | ## Teardown 69 | 70 | To remove any resources created by the Docker Compose stack, use: 71 | 72 | ```bash 73 | docker compose down 74 | ``` 75 | -------------------------------------------------------------------------------- /tls.reflect/compose.yaml: -------------------------------------------------------------------------------- 1 | name: ${NAMESPACE:-zilla-tls-reflect} 2 | services: 3 | zilla: 4 | image: ghcr.io/aklivity/zilla:${ZILLA_VERSION:-latest} 5 | restart: unless-stopped 6 | hostname: zilla.examples.dev 7 | ports: 8 | - 23456:23456 9 | healthcheck: 10 | interval: 5s 11 | timeout: 3s 12 | retries: 5 13 | test: ["CMD", "bash", "-c", "echo -n '' > /dev/tcp/127.0.0.1/23456"] 14 | environment: 15 | KEYSTORE_PASSWORD: generated 16 | volumes: 17 | - ./etc:/etc/zilla 18 | command: start -v -e 19 | 20 | networks: 21 | default: 22 | driver: bridge 23 | -------------------------------------------------------------------------------- /tls.reflect/etc/tls/localhost.p12: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aklivity/zilla-examples/7a5307d97d4a9e1fd731ac4fc3230f0b9e53e36e/tls.reflect/etc/tls/localhost.p12 -------------------------------------------------------------------------------- /tls.reflect/etc/zilla.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: example 3 | vaults: 4 | my_servers: 5 | type: filesystem 6 | options: 7 | keys: 8 | store: tls/localhost.p12 9 | type: pkcs12 10 | password: ${{env.KEYSTORE_PASSWORD}} 11 | bindings: 12 | north_tcp_server: 13 | type: tcp 14 | kind: server 15 | options: 16 | host: 0.0.0.0 17 | port: 23456 18 | exit: north_tls_server 19 | north_tls_server: 20 | type: tls 21 | kind: server 22 | vault: my_servers 23 | options: 24 | keys: 25 | - localhost 26 | sni: 27 | - localhost 28 | alpn: 29 | - echo 30 | routes: 31 | - when: 32 | - alpn: echo 33 | exit: north_fan_server 34 | north_fan_server: 35 | type: fan 36 | kind: server 37 | exit: north_echo_server 38 | north_echo_server: 39 | type: echo 40 | kind: server 41 | telemetry: 42 | exporters: 43 | stdout_logs_exporter: 44 | type: stdout 45 | -------------------------------------------------------------------------------- /tls.reflect/test-ca.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIFXjCCA0YCCQCuorYrG5wG+DANBgkqhkiG9w0BAQsFADBxMQswCQYDVQQGEwJV 3 | UzETMBEGA1UECAwKQ2FsaWZvcm5pYTESMBAGA1UEBwwJUGFsbyBBbHRvMREwDwYD 4 | VQQKDAhBa2xpdml0eTEUMBIGA1UECwwLRGV2ZWxvcG1lbnQxEDAOBgNVBAMMB1Rl 5 | c3QgQ0EwHhcNMjExMjIxMjMwNDExWhcNMzExMjE5MjMwNDExWjBxMQswCQYDVQQG 6 | EwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTESMBAGA1UEBwwJUGFsbyBBbHRvMREw 7 | DwYDVQQKDAhBa2xpdml0eTEUMBIGA1UECwwLRGV2ZWxvcG1lbnQxEDAOBgNVBAMM 8 | B1Rlc3QgQ0EwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDGPVgVO/zd 9 | ebwGWujKymJmztWZ5LIaZC+zY1SwKUBUA3+vrtO79ndi6WePiV0a2e7wov/ajFLp 10 | mor2RfGSMD8Yb9e98QSqnfy9Q5+ABmxFulgSJNwDjnxugZuk/6MILKMg7AsgqaxK 11 | wROSSLcom8b+gkbwXgHm57RKiitXlRM9ujdKibeHwfu7JTk6A7LwRbCVurTRqckw 12 | Q0/mA4mNuZ2AMGW+YL36TwTLfTAa4AVHEbI3U5+TyY3DoV7OoHI4Ec1/7B0CGzqK 13 | smKM3dKmXpRIc5NBZt+eKqphAhp0CD1eAnutWtepahjWyY1fAYk9hZ+ayU52dAMf 14 | +TbkPdMn5jfHhqs95VdfQjsKZPyNTYjhjHN9tAph1wKUG4XRATAvxhA2gpYgN9de 15 | 9ztWPboVzGosauQxPrXklO8CF7hsft0RlCCP9ojVLUkZ42vI/M1S3lD8pCDtPe46 16 | 2zQ9S3F1R7goF3AqWm4EQqu237+zL45pCbbWyyHeXHeDrv3DNXHcWXoFicNmCBl6 17 | nPPsVn9qgdhmJf5QcUKLkJEEtk94Uedv5qEqiJQYSPAIZHKnv4L5Li69kghTbUv/ 18 | Xquz2JdY5daj5eRurgZVjutkmMIaR4rJdhifBonlcKxoeSZoVbnoGzS5KcF9saz8 19 | 9qYU9LtF98CUMY7U4RPlVbA8D4YwDICgcwIDAQABMA0GCSqGSIb3DQEBCwUAA4IC 20 | AQDEzoEbCsHpae5M1I+vezg7w5NAItMU4WU9IccTylSB/gfIT+hWwIv9KiqTWjxw 21 | Y5Aj6XJ1mATHAMSQnNZCnP2Hw39/Nc3HcKmek2na2zK/TBSEFXudJmox8SK32r26 22 | nLstNlcYf7ixqJ5T7SOE2GJOcEUWpvTSbvQD0NvG81BVnSyUfX3FgkQLwwlyBoSE 23 | 7FwFz+ybrbisUHHqzPVnSblEDbKv6T9ai3FjbBegzPVSd9RmtB/DzxhdSk+kL1oD 24 | VSEPweSHEqamEnq2RIgLb7rYhmfohl0fGF5W6I3LvLqqe0KLRRID9V/jwBUGyICG 25 | W3jGu+68jOIUqXA4+gfOwXNktd4F7So48ySbghgrY0Umr4KSs6CTHhvSZ4ZG8QO/ 26 | ZyC+DjXsU3mihIBP/Q43YU7dYxFSdlCw79YnXvdWu7K7lZ1bIcbdH+RShcbvPcwg 27 | iM2qAvCgZBA8xHMDQeev8QdQjxtN+uBfee0mkvbzPbIh/0prywPHjAie/bXVBPVt 28 | VK6Gej2egPCIA5ThvGpmXh8kPd5Aqy1J++cmrzfYfPPsbmPGTLI0HFMhUuzIhFbd 29 | TzAV/Qj83r722s6f0v3KEEhfi3EZu3bRSGIyxVtebtOLGvEb2PjJrktyVJgivVFX 30 | uHHpz76QFOcLy1F962Hfj51NnIROOySyl12JkhPRTlMoiQ== 31 | -----END CERTIFICATE----- 32 | -------------------------------------------------------------------------------- /ws.echo/.github/test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -x 3 | 4 | EXIT=0 5 | 6 | # GIVEN 7 | PORT="7114" 8 | INPUT="Hello, world" 9 | EXPECTED="Hello, world" 10 | echo \# Testing ws.echo 11 | echo PORT="$PORT" 12 | echo INPUT="$INPUT" 13 | echo EXPECTED="$EXPECTED" 14 | 15 | # WHEN 16 | OUTPUT=$(echo "$INPUT" | docker compose -p zilla-ws-echo exec -T websocat websocat --protocol echo ws://zilla.examples.dev:7114/) 17 | RESULT=$? 18 | echo RESULT="$RESULT" 19 | # THEN 20 | echo OUTPUT="$OUTPUT" 21 | echo EXPECTED="$EXPECTED" 22 | echo 23 | if [ "$RESULT" -eq 0 ] && [ "$OUTPUT" = "$EXPECTED" ]; then 24 | echo ✅ 25 | else 26 | echo ❌ 27 | EXIT=1 28 | fi 29 | 30 | exit $EXIT 31 | -------------------------------------------------------------------------------- /ws.echo/README.md: -------------------------------------------------------------------------------- 1 | # ws.echo 2 | 3 | Listens on ws port `7114` and will echo back whatever is sent to the server. 4 | Listens on wss port `7114` and will echo back whatever is sent to the server. 5 | 6 | ## Requirements 7 | 8 | - docker compose 9 | - wscat 10 | 11 | ## Setup 12 | 13 | To `start` the Docker Compose stack defined in the [compose.yaml](compose.yaml) file, use: 14 | 15 | ```bash 16 | docker compose up -d 17 | ``` 18 | 19 | ### Install wscat 20 | 21 | ```bash 22 | npm install wscat -g 23 | ``` 24 | 25 | ### Verify behavior 26 | 27 | ```bash 28 | wscat -c ws://localhost:7114/ -s echo 29 | ``` 30 | 31 | Type a `Hello, world` message and press `enter`. 32 | 33 | output: 34 | 35 | ```text 36 | Connected (press CTRL+C to quit) 37 | > Hello, world 38 | < Hello, world 39 | ``` 40 | 41 | ```bash 42 | wscat -c wss://localhost:7114/ --ca test-ca.crt -s echo 43 | ``` 44 | 45 | Type a `Hello, world` message and press `enter`. 46 | 47 | output: 48 | 49 | ```text 50 | Connected (press CTRL+C to quit) 51 | > Hello, world 52 | < Hello, world 53 | ``` 54 | 55 | ## Teardown 56 | 57 | To remove any resources created by the Docker Compose stack, use: 58 | 59 | ```bash 60 | docker compose down 61 | ``` 62 | -------------------------------------------------------------------------------- /ws.echo/compose.yaml: -------------------------------------------------------------------------------- 1 | name: ${NAMESPACE:-zilla-ws-echo} 2 | services: 3 | zilla: 4 | image: ghcr.io/aklivity/zilla:${ZILLA_VERSION:-latest} 5 | restart: unless-stopped 6 | hostname: zilla.examples.dev 7 | ports: 8 | - 7114:7114 9 | healthcheck: 10 | interval: 5s 11 | timeout: 3s 12 | retries: 5 13 | test: ["CMD", "bash", "-c", "echo -n '' > /dev/tcp/127.0.0.1/7114"] 14 | volumes: 15 | - ./etc:/etc/zilla 16 | command: start -v -e 17 | 18 | websocat: 19 | image: solsson/websocat 20 | entrypoint: ["/bin/sh", "-c"] 21 | command: ["while true; do sleep 3600; done"] 22 | 23 | networks: 24 | default: 25 | driver: bridge 26 | -------------------------------------------------------------------------------- /ws.echo/etc/zilla.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: example 3 | bindings: 4 | north_tcp_server: 5 | type: tcp 6 | kind: server 7 | options: 8 | host: 0.0.0.0 9 | port: 10 | - 7114 11 | routes: 12 | - when: 13 | - port: 7114 14 | exit: north_http_server 15 | north_http_server: 16 | type: http 17 | kind: server 18 | routes: 19 | - when: 20 | - headers: 21 | upgrade: websocket 22 | exit: north_ws_server 23 | north_ws_server: 24 | type: ws 25 | kind: server 26 | routes: 27 | - when: 28 | - protocol: echo 29 | exit: north_echo_server 30 | north_echo_server: 31 | type: echo 32 | kind: server 33 | telemetry: 34 | exporters: 35 | stdout_logs_exporter: 36 | type: stdout 37 | -------------------------------------------------------------------------------- /ws.reflect/.github/test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -x 3 | 4 | EXIT=0 5 | 6 | # GIVEN 7 | PORT="7114" 8 | INPUT1="Hello from client 1" 9 | INPUT2="Hello from client 2" 10 | 11 | echo \# Testing ws.reflect 12 | echo PORT="$PORT" 13 | echo INPUT1="$INPUT1" 14 | echo INPUT2="$INPUT2" 15 | 16 | # WHEN 17 | 18 | for i in $(seq 1 5); do 19 | echo "$INPUT1" | timeout 3 docker compose -p zilla-ws-reflect exec -T websocat websocat --protocol echo ws://zilla.examples.dev:7114/ 20 | 21 | if [ $? -eq 0 ]; then 22 | echo "✅ Zilla is reachable." 23 | break 24 | fi 25 | 26 | sleep 2 27 | done 28 | 29 | { 30 | (echo "$INPUT1"; sleep 2) | timeout 3 docker compose -p zilla-ws-reflect exec -T websocat websocat --protocol echo ws://zilla.examples.dev:$PORT/ & 31 | PID1=$! 32 | (echo "$INPUT2"; sleep 2) | timeout 3 docker compose -p zilla-ws-reflect exec -T websocat websocat --protocol echo ws://zilla.examples.dev:$PORT/ & 33 | PID2=$! 34 | 35 | wait $PID1 $PID2 36 | } > output.out 2>&1 37 | 38 | RESULT1=$? 39 | RESULT2=$? 40 | OUTPUT=$(cat output.out) 41 | 42 | # THEN 43 | COUNT1=$(echo "$OUTPUT" | grep -Fx "$INPUT1" | wc -l) 44 | COUNT2=$(echo "$OUTPUT" | grep -Fx "$INPUT2" | wc -l) 45 | 46 | if [ "$RESULT1" -eq 0 ] && [ "$RESULT2" -eq 0 ] && [ "$COUNT1" -eq 2 ] && [ "$COUNT2" -eq 2 ]; then 47 | echo ✅ 48 | else 49 | echo ❌ 50 | EXIT=1 51 | fi 52 | 53 | exit $EXIT 54 | -------------------------------------------------------------------------------- /ws.reflect/README.md: -------------------------------------------------------------------------------- 1 | # ws.reflect 2 | 3 | Listens on ws port `7114` and will echo back whatever is sent to the server, broadcasting to all clients. 4 | 5 | ## Requirements 6 | 7 | - docker compose 8 | - wscat 9 | 10 | ## Setup 11 | 12 | To `start` the Docker Compose stack defined in the [compose.yaml](compose.yaml) file, use: 13 | 14 | ```bash 15 | docker compose up -d 16 | ``` 17 | 18 | ### Verify behavior 19 | 20 | Connect each client first, then send `Hello, one` from first client, then send `Hello, two` from second client. 21 | 22 | ```bash 23 | wscat -c ws://localhost:7114/ -s echo 24 | ``` 25 | 26 | Type a `Hello, one` message and press `enter`. 27 | 28 | output: 29 | 30 | ```text 31 | Connected (press CTRL+C to quit) 32 | > Hello, one 33 | < Hello, one 34 | < Hello, two 35 | ``` 36 | 37 | ```bash 38 | wscat -c wss://localhost:7114/ --ca test-ca.crt -s echo 39 | ``` 40 | 41 | Type a `Hello, two` message and press `enter`. 42 | 43 | output: 44 | 45 | ```text 46 | Connected (press CTRL+C to quit) 47 | < Hello, one 48 | > Hello, two 49 | < Hello, two 50 | ``` 51 | 52 | ## Teardown 53 | 54 | To remove any resources created by the Docker Compose stack, use: 55 | 56 | ```bash 57 | docker compose down 58 | ``` 59 | -------------------------------------------------------------------------------- /ws.reflect/compose.yaml: -------------------------------------------------------------------------------- 1 | name: ${NAMESPACE:-zilla-ws-reflect} 2 | services: 3 | zilla: 4 | image: ghcr.io/aklivity/zilla:${ZILLA_VERSION:-latest} 5 | restart: unless-stopped 6 | hostname: zilla.examples.dev 7 | ports: 8 | - 7114:7114 9 | healthcheck: 10 | interval: 5s 11 | timeout: 3s 12 | retries: 5 13 | test: ["CMD", "bash", "-c", "echo -n '' > /dev/tcp/127.0.0.1/7114"] 14 | volumes: 15 | - ./etc:/etc/zilla 16 | command: start -v -e 17 | 18 | websocat: 19 | image: solsson/websocat 20 | entrypoint: ["/bin/sh", "-c"] 21 | command: ["while true; do sleep 3600; done"] 22 | depends_on: 23 | zilla: 24 | condition: service_healthy 25 | restart: true 26 | 27 | networks: 28 | default: 29 | driver: bridge 30 | -------------------------------------------------------------------------------- /ws.reflect/etc/zilla.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: example 3 | bindings: 4 | north_tcp_server: 5 | type: tcp 6 | kind: server 7 | options: 8 | host: 0.0.0.0 9 | port: 10 | - 7114 11 | routes: 12 | - when: 13 | - port: 7114 14 | exit: north_http_server 15 | north_http_server: 16 | type: http 17 | kind: server 18 | routes: 19 | - when: 20 | - headers: 21 | upgrade: websocket 22 | exit: north_ws_server 23 | north_ws_server: 24 | type: ws 25 | kind: server 26 | routes: 27 | - when: 28 | - protocol: echo 29 | exit: north_fan_server 30 | north_fan_server: 31 | type: fan 32 | kind: server 33 | exit: north_echo_server 34 | north_echo_server: 35 | type: echo 36 | kind: server 37 | telemetry: 38 | exporters: 39 | stdout_logs_exporter: 40 | type: stdout 41 | --------------------------------------------------------------------------------