├── sql-aggregator ├── VERSION ├── .mvn │ └── wrapper │ │ ├── maven-wrapper.jar │ │ └── maven-wrapper.properties ├── rules.mk ├── Dockerfile ├── .gitignore ├── src │ └── main │ │ └── resources │ │ └── logback.xml └── README.md ├── udf-utilities ├── streaming-runtime-udf-aggregator-js │ ├── .gitignore │ ├── Dockerfile │ ├── proto │ │ └── MessageService.proto │ └── package.json ├── streaming-runtime-udf-aggregator-java │ ├── src │ │ └── main │ │ │ ├── resources │ │ │ └── application.properties │ │ │ └── java │ │ │ └── com │ │ │ └── tanzu │ │ │ └── streaming │ │ │ └── runtime │ │ │ └── udf │ │ │ └── aggregator │ │ │ ├── IdentityReleaser.java │ │ │ ├── PayloadConverter.java │ │ │ ├── Aggregate.java │ │ │ └── Releaser.java │ ├── .mvn │ │ └── wrapper │ │ │ ├── maven-wrapper.jar │ │ │ └── maven-wrapper.properties │ ├── README.md │ ├── .gitignore │ └── pom.xml └── MessageService.proto ├── docs ├── index.md ├── why │ ├── cp-vs-dp.gif │ └── ooperator-hub.png ├── assets │ └── images │ │ ├── waves.png │ │ ├── working.png │ │ ├── slack-icon.png │ │ └── github.svg ├── samples │ ├── kowl-schemas.png │ ├── kowl-topics.png │ ├── rabbitmq-ui.png │ ├── kowl-topics-details.png │ ├── iot-monitoring │ │ └── iot-logo.png │ ├── clickstream │ │ └── clickstream-logo.png │ ├── top-k-songs │ │ └── top-k-songs-logo.png │ ├── anomaly-detection │ │ └── anomaly-detection-logo.png │ └── overview.md ├── architecture │ ├── processors │ │ ├── processors-basic-diagram.png │ │ ├── overview.md │ │ └── scs │ │ │ └── overview.md │ └── service-binding │ │ └── streaming-runtime-service-binding.png ├── stylesheets │ └── extra.css ├── Dockerfile ├── _index.md_ ├── streaming-runtime-build.md └── sr-technical-stack.md ├── streaming-runtime-samples ├── tutorials │ ├── 6-user-score-aggregation-js │ │ ├── .gitignore │ │ ├── Dockerfile │ │ ├── package.json │ │ ├── README.md │ │ └── aggregate.js │ ├── 6.1-team-score-aggregation-js │ │ ├── .gitignore │ │ ├── Dockerfile │ │ ├── package.json │ │ ├── README.md │ │ └── aggregate.js │ ├── 5.2-partition-by-field-with-stateful-replication(rabbitmq).md │ ├── 1-message-retransmission.yaml │ ├── 4-stateless-replication.yaml │ ├── 6.1-partition-by-field-stateful-replication-time-window-aggregation.md │ ├── 3-inline-transformation.md │ ├── 5.1-partition-by-field(header-keys)-with-stateful-replication.md │ ├── 3-inline-transformation.yaml │ ├── 3.1-polyglot-udf-transformation.yaml │ ├── 2.1-multibiner-bridge-production-mode.md │ ├── 3.2-scs-transformation.md │ ├── 8-secrets-management-with-service-binding.md │ ├── 6-time-window-aggregation.md │ ├── 3.1-polyglot-udf-transformation.md │ ├── 1-message-retransmission.md │ ├── 2-multibiner-bridge.yaml │ ├── 7-streaming-join-with-flink-sql.md │ ├── 3.2-scs-transformation.yaml │ ├── 2-multibiner-bridge.md │ ├── 5.1-partition-by-field(header-keys)-with-stateful-replication.yaml │ ├── 4-stateless-replication.md │ ├── 5-partition-by-field-with-stateful-replication.md │ ├── README.md │ ├── 5-partition-by-field-with-stateful-replication.yaml │ ├── 5.2-partition-by-field-with-stateful-replication(rabbitmq).yaml │ ├── 2.1-multibiner-bridge-production-mode.yaml │ ├── 6-time-window-aggregation.yaml │ └── 8-secrets-management-with-service-binding.yaml ├── online-gaming-statistics │ ├── gaming-team-score │ │ ├── .gitignore │ │ ├── Dockerfile │ │ ├── package.json │ │ ├── README.md │ │ └── aggregate.js │ └── gaming-user-score │ │ ├── src │ │ ├── main │ │ │ ├── resources │ │ │ │ └── application.properties │ │ │ └── java │ │ │ │ └── com │ │ │ │ └── tanzu │ │ │ │ └── streaming │ │ │ │ └── runtime │ │ │ │ └── udf │ │ │ │ └── gaming │ │ │ │ └── user │ │ │ │ └── score │ │ │ │ └── GamingUserScoreApplicationProperties.java │ │ └── test │ │ │ └── java │ │ │ └── com │ │ │ └── tanzu │ │ │ └── streaming │ │ │ └── runtime │ │ │ └── udf │ │ │ └── gaming │ │ │ └── user │ │ │ └── score │ │ │ └── GamingUserScoreApplicationTests.java │ │ ├── .mvn │ │ └── wrapper │ │ │ ├── maven-wrapper.jar │ │ │ └── maven-wrapper.properties │ │ ├── README.md │ │ └── .gitignore ├── udf-samples │ ├── udf-uppercase-go │ │ ├── .gitignore │ │ ├── Dockerfile │ │ ├── protos │ │ │ ├── go.mod │ │ │ └── MessageService.proto │ │ ├── go.mod │ │ ├── udf-uppercase-go.go │ │ └── README.md │ ├── udf-uppercase-python │ │ ├── requirements.txt │ │ ├── .gitignore │ │ ├── Dockerfile │ │ ├── protos │ │ │ └── MessageService.proto │ │ ├── message_service_client.py │ │ ├── message_service_server.py │ │ └── README.md │ ├── streaming-runtime-python-udf-pipeline.jpg │ └── udf-uppercase-java │ │ ├── src │ │ ├── main │ │ │ ├── resources │ │ │ │ └── application.properties │ │ │ └── java │ │ │ │ └── net │ │ │ │ └── tzolov │ │ │ │ └── poc │ │ │ │ └── uppercasegrpc │ │ │ │ └── UppercaseGrpcApplication.java │ │ └── test │ │ │ └── java │ │ │ └── net │ │ │ └── tzolov │ │ │ └── poc │ │ │ └── uppercasegrpc │ │ │ └── UppercaseGrpcApplicationTests.java │ │ ├── .mvn │ │ └── wrapper │ │ │ ├── maven-wrapper.jar │ │ │ └── maven-wrapper.properties │ │ ├── README.md │ │ └── .gitignore ├── anomaly-detection │ ├── light │ │ ├── fraud-detection-udf-js │ │ │ ├── .gitignore │ │ │ ├── Dockerfile │ │ │ ├── package.json │ │ │ ├── README.md │ │ │ └── fraud-detector.js │ │ └── streaming-pipeline-light.yaml │ └── README.md ├── clickstream │ └── README.md ├── spring-cloud-stream │ ├── README.md │ ├── streaming-pipeline-ticktock-partitioned-better.yaml │ └── streaming-pipeline-ticktock.yaml ├── iot-monitoring │ └── README.md ├── README.md ├── tests │ ├── test-kafka-stream.yaml │ ├── test-rabbitmq-stream.yaml │ ├── test-rabbitmq-cluster-stream.yaml │ ├── test-kafka-cluster-stream.yaml │ ├── test-rabbitmq-op-clusterstream.yaml │ ├── test-rabbitmq-op-clusterstream2.yaml │ ├── test-processor.yaml │ ├── kafka-ui.yaml │ ├── kafka-kowl-ui.yaml │ ├── kafka-akhg-ui.yaml │ ├── test-all.yaml │ └── streaming-pipeline-auto-provisioned-streams.yaml ├── top-k-songs │ └── README.md └── service-binding │ └── README.md ├── .gitignore ├── streaming-runtime-operator ├── manifests │ ├── streaming-runtime-namespace.yaml │ ├── streaming-runtime-account.yaml │ ├── streaming-runtime-namespaced-role-binding.yaml │ ├── streaming-runtime-cluster-role-binding.yaml │ ├── kustomization.yaml │ ├── streaming-runtime-namespaced-role.yaml │ ├── cluster-stream-deployment.yaml │ └── streaming-runtime-cluster-role.yaml ├── .mvn │ └── wrapper │ │ ├── maven-wrapper.jar │ │ └── maven-wrapper.properties ├── streaming-runtime │ ├── src │ │ ├── main │ │ │ ├── resources │ │ │ │ ├── META-INF │ │ │ │ │ └── spring.factories │ │ │ │ ├── application.yaml │ │ │ │ └── manifests │ │ │ │ │ ├── processor │ │ │ │ │ ├── statefulset-service-template.yaml │ │ │ │ │ ├── generic-streaming-runtime-processor-deployment.yaml │ │ │ │ │ ├── srp-processor-container-template.yaml │ │ │ │ │ ├── statefulset-template.yaml │ │ │ │ │ └── sql-aggregation-container-template.yaml │ │ │ │ │ └── protocol │ │ │ │ │ ├── kafka │ │ │ │ │ ├── kafka-kowl-ui-svc.yaml │ │ │ │ │ ├── kafka-schema-registry-svc.yaml │ │ │ │ │ ├── kafka-zk-svc.yaml │ │ │ │ │ ├── kafka-svc.yaml │ │ │ │ │ ├── kafka-zk-deployment.yaml │ │ │ │ │ ├── kafka-schema-registry-deployment.yaml │ │ │ │ │ ├── kafka-kowl-ui-deployment.yaml │ │ │ │ │ └── kafka-deployment.yaml │ │ │ │ │ ├── rabbitmq │ │ │ │ │ ├── rabbitmq-svc.yaml │ │ │ │ │ └── rabbitmq-deployment.yaml │ │ │ │ │ └── rabbitmq-op │ │ │ │ │ ├── rabbitmq-cluster.yaml │ │ │ │ │ └── RABBITMQ-CLUSTERSTREAM-AUTOPROVISION-TEMPLATE.yaml │ │ │ └── java │ │ │ │ └── com │ │ │ │ └── vmware │ │ │ │ └── tanzu │ │ │ │ └── streaming │ │ │ │ └── runtime │ │ │ │ ├── protocol │ │ │ │ ├── REAME.md │ │ │ │ └── ProtocolDeploymentAdapter.java │ │ │ │ ├── StreamingRuntimeApplication.java │ │ │ │ ├── dataschema │ │ │ │ ├── AvroHelper.java │ │ │ │ ├── InlineAvroToAvroConverter.java │ │ │ │ └── DataSchemaAvroConverter.java │ │ │ │ ├── ProcessorStatusException.java │ │ │ │ ├── StreamingRuntimeProperties.java │ │ │ │ ├── config │ │ │ │ └── StreamingRuntimeConfiguration.java │ │ │ │ └── processor │ │ │ │ └── ProcessorAdapter.java │ │ └── test │ │ │ └── java │ │ │ └── com │ │ │ └── vmware │ │ │ └── tanzu │ │ │ └── streaming │ │ │ └── runtime │ │ │ ├── StreamingRuntimeApplicationTests.java │ │ │ ├── KubernetesComponentTest.java │ │ │ ├── OncePerClassBeforeAllCallback.java │ │ │ ├── throwaway │ │ │ ├── PlayEventsStream.yaml │ │ │ ├── SongStream.yaml │ │ │ ├── TopKSongsPerGenreStream.yaml │ │ │ └── SongPlaysStream.yaml │ │ │ ├── TestApiClientConfig.java │ │ │ ├── MyComponentTest.java │ │ │ └── TestK8SClient.java │ ├── .mvn │ │ └── wrapper │ │ │ ├── maven-wrapper.jar │ │ │ └── maven-wrapper.properties │ └── .gitignore ├── .gitignore ├── skaffold-dev.yaml ├── skaffold.yaml ├── pom.xml └── scripts │ ├── all.sh │ └── generate-streaming-runtime-crd.sh ├── sr-common ├── .mvn │ └── wrapper │ │ ├── maven-wrapper.jar │ │ └── maven-wrapper.properties ├── src │ ├── main │ │ ├── java │ │ │ └── com │ │ │ │ └── tanzu │ │ │ │ └── streaming │ │ │ │ └── runtime │ │ │ │ └── processor │ │ │ │ └── common │ │ │ │ ├── proto │ │ │ │ ├── messageBuilder.java │ │ │ │ ├── GrpcPayloadCollectionOrBuilder.java │ │ │ │ └── PayloadCollection.java │ │ │ │ └── avro │ │ │ │ └── AvroMessageReader.java │ │ └── resources │ │ │ └── proto │ │ │ └── payload_collection.proto │ └── test │ │ └── java │ │ └── com │ │ └── tanzu │ │ └── streaming │ │ └── runtime │ │ └── processor │ │ └── common │ │ └── AppTest.java ├── README.md └── .gitignore ├── srp-processor ├── .mvn │ └── wrapper │ │ ├── maven-wrapper.jar │ │ └── maven-wrapper.properties ├── README.md ├── .gitignore └── src │ └── main │ └── java │ └── com │ └── tanzu │ └── streaming │ └── runtime │ └── srp │ ├── timestamp │ ├── ProcTimestampAssigner.java │ ├── MessageHeaderTimestampAssigner.java │ ├── RecordTimestampAssigner.java │ └── DefaultEventHeaderOrProcTimestampAssigner.java │ └── processor │ ├── window │ ├── state │ │ ├── State.java │ │ └── StateEntry.java │ ├── IdleWindowHolder.java │ ├── IdleWindowsWatchdog.java │ └── TumblingWindowService.java │ └── EventTimeProcessor.java ├── .github └── workflows │ └── docs.yml ├── abbreviations.md ├── NOTICE ├── tools.mk ├── version.mk ├── ARCHIVED.md ├── CLA.md ├── Makefile ├── material └── overrides │ └── main.html └── rules.mk /sql-aggregator/VERSION: -------------------------------------------------------------------------------- 1 | 0.0.2-SNAPSHOT 2 | -------------------------------------------------------------------------------- /udf-utilities/streaming-runtime-udf-aggregator-js/.gitignore: -------------------------------------------------------------------------------- 1 | node_modules 2 | -------------------------------------------------------------------------------- /docs/index.md: -------------------------------------------------------------------------------- 1 | --- 2 | template: overrides/home.html 3 | title: Streaming Runtime 4 | --- 5 | -------------------------------------------------------------------------------- /streaming-runtime-samples/tutorials/6-user-score-aggregation-js/.gitignore: -------------------------------------------------------------------------------- 1 | node_modules 2 | -------------------------------------------------------------------------------- /streaming-runtime-samples/tutorials/6.1-team-score-aggregation-js/.gitignore: -------------------------------------------------------------------------------- 1 | node_modules 2 | -------------------------------------------------------------------------------- /udf-utilities/streaming-runtime-udf-aggregator-java/src/main/resources/application.properties: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /streaming-runtime-samples/online-gaming-statistics/gaming-team-score/.gitignore: -------------------------------------------------------------------------------- 1 | node_modules 2 | -------------------------------------------------------------------------------- /streaming-runtime-samples/udf-samples/udf-uppercase-go/.gitignore: -------------------------------------------------------------------------------- 1 | udf-uppercase-go 2 | .vscode 3 | -------------------------------------------------------------------------------- /streaming-runtime-samples/udf-samples/udf-uppercase-python/requirements.txt: -------------------------------------------------------------------------------- 1 | grpcio 2 | grpcio-tools -------------------------------------------------------------------------------- /streaming-runtime-samples/anomaly-detection/light/fraud-detection-udf-js/.gitignore: -------------------------------------------------------------------------------- 1 | node_modules 2 | -------------------------------------------------------------------------------- /streaming-runtime-samples/udf-samples/udf-uppercase-python/.gitignore: -------------------------------------------------------------------------------- 1 | ./venv 2 | __pycache__/ 3 | .idea/ -------------------------------------------------------------------------------- /docs/why/cp-vs-dp.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vmware-archive/streaming-runtimes/HEAD/docs/why/cp-vs-dp.gif -------------------------------------------------------------------------------- /docs/why/ooperator-hub.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vmware-archive/streaming-runtimes/HEAD/docs/why/ooperator-hub.png -------------------------------------------------------------------------------- /docs/assets/images/waves.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vmware-archive/streaming-runtimes/HEAD/docs/assets/images/waves.png -------------------------------------------------------------------------------- /docs/samples/kowl-schemas.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vmware-archive/streaming-runtimes/HEAD/docs/samples/kowl-schemas.png -------------------------------------------------------------------------------- /docs/samples/kowl-topics.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vmware-archive/streaming-runtimes/HEAD/docs/samples/kowl-topics.png -------------------------------------------------------------------------------- /docs/samples/rabbitmq-ui.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vmware-archive/streaming-runtimes/HEAD/docs/samples/rabbitmq-ui.png -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | ### IntelliJ IDEA ### 2 | .idea 3 | *.iws 4 | *.iml 5 | *.ipr 6 | 7 | .DS_Store 8 | build/ 9 | 10 | .vscode/ -------------------------------------------------------------------------------- /docs/assets/images/working.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vmware-archive/streaming-runtimes/HEAD/docs/assets/images/working.png -------------------------------------------------------------------------------- /docs/assets/images/slack-icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vmware-archive/streaming-runtimes/HEAD/docs/assets/images/slack-icon.png -------------------------------------------------------------------------------- /docs/samples/kowl-topics-details.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vmware-archive/streaming-runtimes/HEAD/docs/samples/kowl-topics-details.png -------------------------------------------------------------------------------- /streaming-runtime-operator/manifests/streaming-runtime-namespace.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: streaming-runtime -------------------------------------------------------------------------------- /docs/samples/iot-monitoring/iot-logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vmware-archive/streaming-runtimes/HEAD/docs/samples/iot-monitoring/iot-logo.png -------------------------------------------------------------------------------- /sr-common/.mvn/wrapper/maven-wrapper.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vmware-archive/streaming-runtimes/HEAD/sr-common/.mvn/wrapper/maven-wrapper.jar -------------------------------------------------------------------------------- /streaming-runtime-operator/manifests/streaming-runtime-account.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: streaming-runtime -------------------------------------------------------------------------------- /srp-processor/.mvn/wrapper/maven-wrapper.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vmware-archive/streaming-runtimes/HEAD/srp-processor/.mvn/wrapper/maven-wrapper.jar -------------------------------------------------------------------------------- /docs/samples/clickstream/clickstream-logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vmware-archive/streaming-runtimes/HEAD/docs/samples/clickstream/clickstream-logo.png -------------------------------------------------------------------------------- /docs/samples/top-k-songs/top-k-songs-logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vmware-archive/streaming-runtimes/HEAD/docs/samples/top-k-songs/top-k-songs-logo.png -------------------------------------------------------------------------------- /sql-aggregator/.mvn/wrapper/maven-wrapper.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vmware-archive/streaming-runtimes/HEAD/sql-aggregator/.mvn/wrapper/maven-wrapper.jar -------------------------------------------------------------------------------- /docs/architecture/processors/processors-basic-diagram.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vmware-archive/streaming-runtimes/HEAD/docs/architecture/processors/processors-basic-diagram.png -------------------------------------------------------------------------------- /docs/samples/anomaly-detection/anomaly-detection-logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vmware-archive/streaming-runtimes/HEAD/docs/samples/anomaly-detection/anomaly-detection-logo.png -------------------------------------------------------------------------------- /streaming-runtime-operator/.mvn/wrapper/maven-wrapper.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vmware-archive/streaming-runtimes/HEAD/streaming-runtime-operator/.mvn/wrapper/maven-wrapper.jar -------------------------------------------------------------------------------- /streaming-runtime-operator/streaming-runtime/src/main/resources/META-INF/spring.factories: -------------------------------------------------------------------------------- 1 | org.springframework.nativex.type.NativeConfiguration=io.kubernetes.nativex.KubernetesApiNativeConfiguration -------------------------------------------------------------------------------- /streaming-runtime-operator/streaming-runtime/src/main/resources/application.yaml: -------------------------------------------------------------------------------- 1 | streaming: 2 | runtime: 3 | operator: 4 | autoProvisionStream: true 5 | autoProvisionClusterStream: true -------------------------------------------------------------------------------- /sql-aggregator/rules.mk: -------------------------------------------------------------------------------- 1 | sql-aggregator.build_dir = $(build_dir)/sql-aggregator 2 | sql-aggregator.out_dir = $(out_dir)/sql-aggregator 3 | sql-aggregator.bucket = gs://streaming-runtimes/sql-aggregator 4 | -------------------------------------------------------------------------------- /sr-common/src/main/java/com/tanzu/streaming/runtime/processor/common/proto/messageBuilder.java: -------------------------------------------------------------------------------- 1 | package com.tanzu.streaming.runtime.processor.common.proto; 2 | 3 | public class messageBuilder { 4 | 5 | } 6 | -------------------------------------------------------------------------------- /streaming-runtime-samples/clickstream/README.md: -------------------------------------------------------------------------------- 1 | # Clickstream Analysis 2 | 3 | Follow the [Clickstream Analysis](https://vmware-tanzu.github.io/streaming-runtimes/samples/clickstream/clickstream/) tutorial. -------------------------------------------------------------------------------- /docs/architecture/service-binding/streaming-runtime-service-binding.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vmware-archive/streaming-runtimes/HEAD/docs/architecture/service-binding/streaming-runtime-service-binding.png -------------------------------------------------------------------------------- /streaming-runtime-operator/streaming-runtime/.mvn/wrapper/maven-wrapper.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vmware-archive/streaming-runtimes/HEAD/streaming-runtime-operator/streaming-runtime/.mvn/wrapper/maven-wrapper.jar -------------------------------------------------------------------------------- /streaming-runtime-samples/udf-samples/streaming-runtime-python-udf-pipeline.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vmware-archive/streaming-runtimes/HEAD/streaming-runtime-samples/udf-samples/streaming-runtime-python-udf-pipeline.jpg -------------------------------------------------------------------------------- /streaming-runtime-samples/udf-samples/udf-uppercase-java/src/main/resources/application.properties: -------------------------------------------------------------------------------- 1 | spring.cloud.function.definition=uppercase 2 | 3 | spring.cloud.function.grpc.mode=server 4 | spring.cloud.function.grpc.port=55554 -------------------------------------------------------------------------------- /udf-utilities/streaming-runtime-udf-aggregator-java/.mvn/wrapper/maven-wrapper.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vmware-archive/streaming-runtimes/HEAD/udf-utilities/streaming-runtime-udf-aggregator-java/.mvn/wrapper/maven-wrapper.jar -------------------------------------------------------------------------------- /streaming-runtime-samples/spring-cloud-stream/README.md: -------------------------------------------------------------------------------- 1 | # Spring Cloud Stream Pipeline 2 | 3 | The [Spring Cloud Stream Pipeline](https://vmware-tanzu.github.io/streaming-runtimes/samples/spring-cloud-stream/tick-tock/) tutorial. 4 | -------------------------------------------------------------------------------- /streaming-runtime-samples/anomaly-detection/README.md: -------------------------------------------------------------------------------- 1 | # Credit Card Anomaly Detection 2 | 3 | Follow the [Credit Card Anomaly Detection](https://vmware-tanzu.github.io/streaming-runtimes/samples/anomaly-detection/anomaly-detection/) tutorial. -------------------------------------------------------------------------------- /streaming-runtime-samples/udf-samples/udf-uppercase-java/.mvn/wrapper/maven-wrapper.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vmware-archive/streaming-runtimes/HEAD/streaming-runtime-samples/udf-samples/udf-uppercase-java/.mvn/wrapper/maven-wrapper.jar -------------------------------------------------------------------------------- /streaming-runtime-samples/online-gaming-statistics/gaming-user-score/src/main/resources/application.properties: -------------------------------------------------------------------------------- 1 | spring.cloud.function.definition=userScore 2 | 3 | spring.cloud.function.grpc.mode=server 4 | spring.cloud.function.grpc.port=55554 5 | -------------------------------------------------------------------------------- /streaming-runtime-samples/tutorials/5.2-partition-by-field-with-stateful-replication(rabbitmq).md: -------------------------------------------------------------------------------- 1 | ### 5.2 Partition by Field - RabbitMQ version 2 | 3 | Same as (5.) but with RabbitMQ brokers instead for the partitioning section of the pipeline. 4 | 5 | -------------------------------------------------------------------------------- /streaming-runtime-samples/online-gaming-statistics/gaming-user-score/.mvn/wrapper/maven-wrapper.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vmware-archive/streaming-runtimes/HEAD/streaming-runtime-samples/online-gaming-statistics/gaming-user-score/.mvn/wrapper/maven-wrapper.jar -------------------------------------------------------------------------------- /sql-aggregator/.mvn/wrapper/maven-wrapper.properties: -------------------------------------------------------------------------------- 1 | distributionUrl=https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.8.3/apache-maven-3.8.3-bin.zip 2 | wrapperUrl=https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar 3 | -------------------------------------------------------------------------------- /sr-common/src/main/resources/proto/payload_collection.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | option java_multiple_files = true; 4 | option java_package = "com.tanzu.streaming.runtime.processor.common.proto"; 5 | 6 | message GrpcPayloadCollection { 7 | repeated bytes payload = 1; 8 | } 9 | -------------------------------------------------------------------------------- /srp-processor/README.md: -------------------------------------------------------------------------------- 1 | # Side-Car Windowed (SRP) Processor 2 | 3 | ./mvnw spring-boot:build-image -Dspring-boot.build-image.imageName=ghcr.io/vmware-tanzu/streaming-runtimes/srp-processor -DskipTests 4 | docker push ghcr.io/vmware-tanzu/streaming-runtimes/srp-processor:latest 5 | 6 | -------------------------------------------------------------------------------- /srp-processor/.mvn/wrapper/maven-wrapper.properties: -------------------------------------------------------------------------------- 1 | distributionUrl=https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.8.5/apache-maven-3.8.5-bin.zip 2 | wrapperUrl=https://repo.maven.apache.org/maven2/org/apache/maven/wrapper/maven-wrapper/3.1.0/maven-wrapper-3.1.0.jar 3 | -------------------------------------------------------------------------------- /streaming-runtime-operator/.mvn/wrapper/maven-wrapper.properties: -------------------------------------------------------------------------------- 1 | distributionUrl=https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.8.3/apache-maven-3.8.3-bin.zip 2 | wrapperUrl=https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar 3 | -------------------------------------------------------------------------------- /streaming-runtime-samples/iot-monitoring/README.md: -------------------------------------------------------------------------------- 1 | # Real Time IoT Monitoring 2 | 3 | The [Real Time IoT Log Monitoring](https://vmware-tanzu.github.io/streaming-runtimes/samples/iot-monitoring/iot-monitoring/) tutorial shows how to build a application that monitors network traffic logs. 4 | -------------------------------------------------------------------------------- /streaming-runtime-operator/streaming-runtime/.mvn/wrapper/maven-wrapper.properties: -------------------------------------------------------------------------------- 1 | distributionUrl=https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.8.3/apache-maven-3.8.3-bin.zip 2 | wrapperUrl=https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar 3 | -------------------------------------------------------------------------------- /sr-common/README.md: -------------------------------------------------------------------------------- 1 | 2 | ## Generate the protobuf stubs. From within the sr-common folder run: 3 | 4 | ``` 5 | protoc -I=./src/main/resources/proto --java_out=./src/main/java ./src/main/resources/proto/payload_collection.proto 6 | ``` 7 | 8 | Mind that the `GrpcPayloadCollectionSeDe` is not generated class! -------------------------------------------------------------------------------- /streaming-runtime-samples/udf-samples/udf-uppercase-java/.mvn/wrapper/maven-wrapper.properties: -------------------------------------------------------------------------------- 1 | distributionUrl=https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.8.3/apache-maven-3.8.3-bin.zip 2 | wrapperUrl=https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar 3 | -------------------------------------------------------------------------------- /docs/stylesheets/extra.css: -------------------------------------------------------------------------------- 1 | :root { 2 | --md-primary-fg-color: #304250; 3 | /*--md-primary-fg-color: #22343c;*/ 4 | --md-primary-fg-color--light: #49afd9; 5 | /*--md-primary-fg-color--light: #adbbc4;*/ 6 | --md-primary-fg-color--dark: #22343c; 7 | /*--md-primary-fg-color--dark: #1b2a32;*/ 8 | } -------------------------------------------------------------------------------- /streaming-runtime-samples/anomaly-detection/light/fraud-detection-udf-js/Dockerfile: -------------------------------------------------------------------------------- 1 | # syntax=docker/dockerfile:1 2 | FROM node:18.4.0 3 | ENV NODE_ENV=production 4 | WORKDIR /app 5 | COPY ["package.json", "package-lock.json*", "./"] 6 | RUN npm install --production 7 | COPY fraud-detector.js . 8 | CMD [ "node", "fraud-detector.js" ] -------------------------------------------------------------------------------- /udf-utilities/streaming-runtime-udf-aggregator-java/.mvn/wrapper/maven-wrapper.properties: -------------------------------------------------------------------------------- 1 | distributionUrl=https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.8.5/apache-maven-3.8.5-bin.zip 2 | wrapperUrl=https://repo.maven.apache.org/maven2/org/apache/maven/wrapper/maven-wrapper/3.1.0/maven-wrapper-3.1.0.jar 3 | -------------------------------------------------------------------------------- /streaming-runtime-samples/README.md: -------------------------------------------------------------------------------- 1 | # Streaming Runtime Use Cases 2 | 3 | Various samples to demonstrate how to implement various streaming and event-driven use case scenarios with the help of the Streaming Runtime. 4 | Follow the [samples documentation](https://vmware-tanzu.github.io/streaming-runtimes/samples/overview/). 5 | 6 | -------------------------------------------------------------------------------- /streaming-runtime-samples/tutorials/1-message-retransmission.yaml: -------------------------------------------------------------------------------- 1 | # 1. Message Retransmission 2 | apiVersion: streaming.tanzu.vmware.com/v1alpha1 3 | kind: Processor 4 | metadata: 5 | name: re-transmission-processor 6 | spec: 7 | type: SRP 8 | inputs: 9 | - name: data-in 10 | outputs: 11 | - name: data-out 12 | -------------------------------------------------------------------------------- /streaming-runtime-samples/tests/test-kafka-stream.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: streaming.tanzu.vmware.com/v1alpha1 2 | kind: Stream 3 | metadata: 4 | name: my-kafka-stream 5 | spec: 6 | keys: ["truckclass", "truckid"] 7 | streamMode: ["read"] 8 | protocol: "kafka" 9 | storage: 10 | clusterStream: "my-kafka-cluster-stream" 11 | -------------------------------------------------------------------------------- /streaming-runtime-samples/udf-samples/udf-uppercase-python/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.9.7-slim 2 | 3 | RUN pip install grpcio 4 | RUN pip install grpcio-tools 5 | 6 | ADD MessageService_pb2.py / 7 | ADD MessageService_pb2_grpc.py / 8 | ADD message_service_server.py / 9 | ENTRYPOINT ["python","/message_service_server.py"] 10 | CMD [] -------------------------------------------------------------------------------- /streaming-runtime-samples/online-gaming-statistics/gaming-user-score/.mvn/wrapper/maven-wrapper.properties: -------------------------------------------------------------------------------- 1 | distributionUrl=https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.8.5/apache-maven-3.8.5-bin.zip 2 | wrapperUrl=https://repo.maven.apache.org/maven2/org/apache/maven/wrapper/maven-wrapper/3.1.0/maven-wrapper-3.1.0.jar 3 | -------------------------------------------------------------------------------- /streaming-runtime-samples/top-k-songs/README.md: -------------------------------------------------------------------------------- 1 | # Top-K Songs By Genre 2 | 3 | The [Music Chart](https://vmware-tanzu.github.io/streaming-runtimes/samples/top-k-songs/top-k-songs/) tutorial shows how to build a music ranking application that continuously computes the latest Top 3 music charts based on song play events collected in real-time. 4 | -------------------------------------------------------------------------------- /streaming-runtime-samples/tests/test-rabbitmq-stream.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: streaming.tanzu.vmware.com/v1alpha1 2 | kind: Stream 3 | metadata: 4 | name: my-rabbitmq-stream 5 | spec: 6 | keys: ["truckclass", "truckid"] 7 | streamMode: ["read"] 8 | protocol: "rabbitmq" 9 | storage: 10 | clusterStream: "my-rabbitmq-cluster-stream" 11 | -------------------------------------------------------------------------------- /streaming-runtime-samples/tutorials/6-user-score-aggregation-js/Dockerfile: -------------------------------------------------------------------------------- 1 | # syntax=docker/dockerfile:1 2 | 3 | FROM node:18.4.0 4 | ENV NODE_ENV=production 5 | 6 | WORKDIR /app 7 | 8 | COPY ["package.json", "package-lock.json*", "./"] 9 | 10 | RUN npm install --production 11 | 12 | COPY aggregate.js . 13 | 14 | CMD [ "node", "aggregate.js" ] -------------------------------------------------------------------------------- /streaming-runtime-samples/online-gaming-statistics/gaming-team-score/Dockerfile: -------------------------------------------------------------------------------- 1 | # syntax=docker/dockerfile:1 2 | 3 | FROM node:18.4.0 4 | ENV NODE_ENV=production 5 | 6 | WORKDIR /app 7 | 8 | COPY ["package.json", "package-lock.json*", "./"] 9 | 10 | RUN npm install --production 11 | 12 | COPY aggregate.js . 13 | 14 | CMD [ "node", "aggregate.js" ] -------------------------------------------------------------------------------- /streaming-runtime-samples/tutorials/6.1-team-score-aggregation-js/Dockerfile: -------------------------------------------------------------------------------- 1 | # syntax=docker/dockerfile:1 2 | 3 | FROM node:18.4.0 4 | ENV NODE_ENV=production 5 | 6 | WORKDIR /app 7 | 8 | COPY ["package.json", "package-lock.json*", "./"] 9 | 10 | RUN npm install --production 11 | 12 | COPY aggregate.js . 13 | 14 | CMD [ "node", "aggregate.js" ] -------------------------------------------------------------------------------- /.github/workflows/docs.yml: -------------------------------------------------------------------------------- 1 | name: docs 2 | on: 3 | push: 4 | branches: 5 | - main 6 | jobs: 7 | deploy: 8 | runs-on: ubuntu-latest 9 | steps: 10 | - uses: actions/checkout@v2 11 | - uses: actions/setup-python@v2 12 | with: 13 | python-version: 3.x 14 | - run: pip install mkdocs-material 15 | - run: mkdocs gh-deploy --force 16 | -------------------------------------------------------------------------------- /streaming-runtime-operator/streaming-runtime/src/test/java/com/vmware/tanzu/streaming/runtime/StreamingRuntimeApplicationTests.java: -------------------------------------------------------------------------------- 1 | package com.vmware.tanzu.streaming.runtime; 2 | 3 | import org.junit.jupiter.api.Test; 4 | 5 | //@SpringBootTest 6 | class StreamingRuntimeApplicationTests { 7 | 8 | @Test 9 | void contextLoads() { 10 | System.out.println("boza"); 11 | } 12 | 13 | } 14 | -------------------------------------------------------------------------------- /sql-aggregator/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM maven:3.8.3-jdk-11 2 | 3 | 4 | WORKDIR /usr/src/sql-aggregator 5 | COPY . /usr/src/sql-aggregator 6 | 7 | 8 | RUN mvn -B -DnewVersion=$(cat ./VERSION) -DgenerateBackupPoms=false versions:set 9 | RUN mvn package 10 | RUN mkdir -p /out && cp target/sql-aggregator-$(cat ./VERSION).jar /out/sql-aggregator.jar 11 | 12 | ENTRYPOINT [ "java", "-jar", "/out/sql-aggregator.jar" ] 13 | -------------------------------------------------------------------------------- /streaming-runtime-samples/tutorials/4-stateless-replication.yaml: -------------------------------------------------------------------------------- 1 | # 4. Stateless Replication 2 | apiVersion: streaming.tanzu.vmware.com/v1alpha1 3 | kind: Processor 4 | metadata: 5 | name: stateless-replication 6 | spec: 7 | type: SRP 8 | # 3 instances 9 | replicas: 3 10 | inputs: 11 | - name: data-in 12 | outputs: 13 | - name: data-out 14 | # attributes: 15 | # forceStatefulSet: "true" -------------------------------------------------------------------------------- /streaming-runtime-samples/service-binding/README.md: -------------------------------------------------------------------------------- 1 | # Service Binding 2 | 3 | For information read the streaming runtime [Service Binding](https://vmware-tanzu.github.io/streaming-runtimes/service-binding/service-binding/) documentation. 4 | 5 | Check the anomaly-detection `streaming-pipeline-sb.yaml` and `streaming-pipeline-sb2.yaml` sample to see how to use Service Binding to pass RabbitMQ credentials to the Processors. -------------------------------------------------------------------------------- /streaming-runtime-samples/tutorials/6-user-score-aggregation-js/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "gaming-team-score", 3 | "version": "1.0.0", 4 | "description": "", 5 | "main": "aggregate.js", 6 | "scripts": { 7 | "test": "echo \"Error: no test specified\" && exit 1" 8 | }, 9 | "author": "", 10 | "license": "ISC", 11 | "dependencies": { 12 | "streaming-runtime-udf-aggregator": "^1.0.6" 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /streaming-runtime-samples/udf-samples/udf-uppercase-java/src/test/java/net/tzolov/poc/uppercasegrpc/UppercaseGrpcApplicationTests.java: -------------------------------------------------------------------------------- 1 | package net.tzolov.poc.uppercasegrpc; 2 | 3 | import org.junit.jupiter.api.Test; 4 | import org.springframework.boot.test.context.SpringBootTest; 5 | 6 | @SpringBootTest 7 | class UppercaseGrpcApplicationTests { 8 | 9 | @Test 10 | void contextLoads() { 11 | } 12 | 13 | } 14 | -------------------------------------------------------------------------------- /streaming-runtime-operator/streaming-runtime/src/main/resources/manifests/processor/statefulset-service-template.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: streaming-runtime-processor 5 | labels: 6 | app: streaming-runtime-processor 7 | spec: 8 | ports: 9 | - port: 8080 10 | name: streaming-runtime-processor 11 | clusterIP: None 12 | selector: 13 | app: streaming-runtime-processor 14 | -------------------------------------------------------------------------------- /streaming-runtime-samples/online-gaming-statistics/gaming-team-score/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "gaming-team-score", 3 | "version": "1.0.0", 4 | "description": "", 5 | "main": "aggregate.js", 6 | "scripts": { 7 | "test": "echo \"Error: no test specified\" && exit 1" 8 | }, 9 | "author": "", 10 | "license": "ISC", 11 | "dependencies": { 12 | "streaming-runtime-udf-aggregator": "^1.0.6" 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /streaming-runtime-samples/tutorials/6.1-team-score-aggregation-js/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "gaming-team-score", 3 | "version": "1.0.0", 4 | "description": "", 5 | "main": "aggregate.js", 6 | "scripts": { 7 | "test": "echo \"Error: no test specified\" && exit 1" 8 | }, 9 | "author": "", 10 | "license": "ISC", 11 | "dependencies": { 12 | "streaming-runtime-udf-aggregator": "^1.0.6" 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /streaming-runtime-samples/udf-samples/udf-uppercase-go/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:alpine3.14 2 | 3 | WORKDIR /app 4 | 5 | COPY go.mod ./ 6 | COPY go.sum ./ 7 | 8 | 9 | RUN mkdir -p /app/protos 10 | 11 | COPY ./protos/* /app/protos 12 | RUN ls /app 13 | RUN ls /app/protos 14 | 15 | RUN go mod download 16 | 17 | COPY *.go ./ 18 | 19 | RUN go build -o /udf-uppercase-go 20 | 21 | EXPOSE 55554 22 | 23 | CMD [ "/udf-uppercase-go" ] -------------------------------------------------------------------------------- /udf-utilities/streaming-runtime-udf-aggregator-js/Dockerfile: -------------------------------------------------------------------------------- 1 | # syntax=docker/dockerfile:1 2 | 3 | FROM node:18.4.0 4 | ENV NODE_ENV=production 5 | 6 | WORKDIR /app 7 | 8 | COPY ["package.json", "package-lock.json*", "./"] 9 | 10 | RUN npm install --production 11 | 12 | COPY generated /app/generated 13 | COPY proto /app/proto 14 | COPY aggregate.js . 15 | COPY streaming-aggregator.js . 16 | 17 | CMD [ "node", "aggregate.js" ] -------------------------------------------------------------------------------- /streaming-runtime-operator/manifests/streaming-runtime-namespaced-role-binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: RoleBinding 3 | metadata: 4 | name: streaming-runtime-namespaced-role-binding 5 | namespace: streaming-runtime 6 | roleRef: 7 | apiGroup: rbac.authorization.k8s.io 8 | kind: Role 9 | name: streaming-runtime-resource-role 10 | subjects: 11 | - kind: ServiceAccount 12 | name: streaming-runtime -------------------------------------------------------------------------------- /udf-utilities/streaming-runtime-udf-aggregator-java/README.md: -------------------------------------------------------------------------------- 1 | # Getting Started 2 | 3 | 4 | ``` 5 | export PAT= 6 | echo $PAT | docker login ghcr.io --username --password-stdin 7 | 8 | ./mvnw spring-boot:build-image -Dspring-boot.build-image.imageName=ghcr.io/vmware-tanzu/streaming-runtimes/gaming-user-score -DskipTests 9 | docker push ghcr.io/vmware-tanzu/streaming-runtimes/gaming-user-score:latest 10 | ``` 11 | -------------------------------------------------------------------------------- /streaming-runtime-samples/udf-samples/udf-uppercase-java/README.md: -------------------------------------------------------------------------------- 1 | # Getting Started 2 | 3 | 4 | ``` 5 | export PAT= 6 | echo $PAT | docker login ghcr.io --username --password-stdin 7 | 8 | ./mvnw spring-boot:build-image -Dspring-boot.build-image.imageName=ghcr.io/vmware-tanzu/streaming-runtimes/udf-uppercase-java -DskipTests 9 | docker push ghcr.io/vmware-tanzu/streaming-runtimes/udf-uppercase-java:latest 10 | ``` 11 | -------------------------------------------------------------------------------- /streaming-runtime-operator/streaming-runtime/src/main/resources/manifests/protocol/kafka/kafka-kowl-ui-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: kafka-kowl-ui 5 | labels: 6 | app: kafka-kowl-ui 7 | spec: 8 | type: LoadBalancer 9 | ports: 10 | - port: 80 11 | name: kafka-kowl-ui 12 | targetPort: 8080 13 | protocol: TCP 14 | 15 | selector: 16 | app: kafka-kowl-ui 17 | component: kafka-kowl-ui 18 | -------------------------------------------------------------------------------- /streaming-runtime-samples/online-gaming-statistics/gaming-user-score/README.md: -------------------------------------------------------------------------------- 1 | # Getting Started 2 | 3 | 4 | ``` 5 | export PAT= 6 | echo $PAT | docker login ghcr.io --username --password-stdin 7 | 8 | ./mvnw spring-boot:build-image -Dspring-boot.build-image.imageName=ghcr.io/vmware-tanzu/streaming-runtimes/gaming-user-score -DskipTests 9 | docker push ghcr.io/vmware-tanzu/streaming-runtimes/gaming-user-score:latest 10 | ``` 11 | -------------------------------------------------------------------------------- /streaming-runtime-samples/tutorials/6.1-partition-by-field-stateful-replication-time-window-aggregation.md: -------------------------------------------------------------------------------- 1 | ### 6.1 Partition by Field with replicated Time-Window aggregation 2 | 3 | Reliably scale out the time-window aggregation processors by ensuring inbound data partitioning on the same key. 4 | 5 | Complete [documentation](https://vmware-tanzu.github.io/streaming-runtimes/architecture/processors/srp/time-window-aggregation/#partitioned-time-window-aggregation) 6 | 7 | -------------------------------------------------------------------------------- /streaming-runtime-operator/streaming-runtime/src/main/resources/manifests/protocol/kafka/kafka-schema-registry-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: s-registry 5 | labels: 6 | app: kafka 7 | component: s-registry 8 | type: streaming-spike 9 | spec: 10 | ports: 11 | - name: registry 12 | port: 8081 13 | protocol: TCP 14 | type: NodePort 15 | selector: 16 | app: s-registry 17 | component: s-registry -------------------------------------------------------------------------------- /sr-common/src/test/java/com/tanzu/streaming/runtime/processor/common/AppTest.java: -------------------------------------------------------------------------------- 1 | package com.tanzu.streaming.runtime.processor.common; 2 | 3 | import static org.junit.Assert.assertTrue; 4 | 5 | import org.junit.Test; 6 | 7 | /** 8 | * Unit test for simple App. 9 | */ 10 | public class AppTest 11 | { 12 | /** 13 | * Rigorous Test :-) 14 | */ 15 | @Test 16 | public void shouldAnswerWithTrue() 17 | { 18 | assertTrue( true ); 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /streaming-runtime-operator/manifests/streaming-runtime-cluster-role-binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: streaming-runtime-cluster-role-binding 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: streaming-runtime-cluster-role 9 | subjects: 10 | - kind: ServiceAccount 11 | name: streaming-runtime 12 | namespace: streaming-runtime 13 | # namespace: default 14 | -------------------------------------------------------------------------------- /streaming-runtime-samples/tests/test-rabbitmq-cluster-stream.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: streaming.tanzu.vmware.com/v1alpha1 2 | kind: ClusterStream 3 | metadata: 4 | name: my-rabbitmq-cluster-stream 5 | spec: 6 | name: "topicNme" 7 | keys: ["truckclass", "truckid"] 8 | streamModes: ["read", "write"] 9 | storage: 10 | server: 11 | url: "http://localhost:8080" 12 | protocol: "rabbitmq" 13 | attributes: 14 | key1: "value1" 15 | reclaimPolicy: "Retain" 16 | -------------------------------------------------------------------------------- /streaming-runtime-samples/tests/test-kafka-cluster-stream.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: streaming.tanzu.vmware.com/v1alpha1 2 | kind: ClusterStream 3 | metadata: 4 | name: my-kafka-cluster-stream 5 | spec: 6 | name: my-kafka-cluster-stream 7 | keys: ["truckclass", "truckid"] 8 | streamModes: ["read", "write"] 9 | storage: 10 | server: 11 | url: "http://localhost:8080" 12 | protocol: "kafka" 13 | attributes: 14 | namespace: "streaming-runtime" 15 | reclaimPolicy: "Retain" 16 | -------------------------------------------------------------------------------- /streaming-runtime-samples/tutorials/3-inline-transformation.md: -------------------------------------------------------------------------------- 1 | ## 3. Inline Data Transformation 2 | 3 | The SpEL expressions can be applied to transform the input payloads on the fly. 4 | 5 | - The spel.expression is applied on the inbound message and the result is used as outbound payload. 6 | - The output.headers expression extracts values from the inbound headers/payload and injects new key/value headers to the outbound messages: =<[payload.|header.]expression> 7 | 8 | Note: SRP specific only. 9 | -------------------------------------------------------------------------------- /streaming-runtime-samples/anomaly-detection/light/fraud-detection-udf-js/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "fraud-detection-udf", 3 | "version": "1.0.0", 4 | "description": "JS aggregation function that computes the count of logging attempts per user", 5 | "main": "fraud-detector.js", 6 | "scripts": { 7 | "test": "echo \"Error: no test specified\" && exit 1" 8 | }, 9 | "author": "", 10 | "license": "ISC", 11 | "dependencies": { 12 | "streaming-runtime-udf-aggregator": "^1.0.6" 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /streaming-runtime-samples/tutorials/5.1-partition-by-field(header-keys)-with-stateful-replication.md: -------------------------------------------------------------------------------- 1 | ### 5.1 Partition by Field using Header Keys 2 | 3 | Variation of the 5-partition-by-field-with-stateful-replication.yaml that uses message headers as partitioning keys. 4 | The 'spec.keys' value in the partitioned Stream must exist as a header name in the messages carried by that stream. 5 | 6 | Also the 'data-in-stream' and 'team-scores-stream' Stream definitions are dropped in favor of auto-provisioned defaults. 7 | 8 | -------------------------------------------------------------------------------- /streaming-runtime-samples/tests/test-rabbitmq-op-clusterstream.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: streaming.tanzu.vmware.com/v1alpha1 2 | kind: ClusterStream 3 | metadata: 4 | name: test-rabbitmq-op-clusterstream 5 | spec: 6 | keys: ["truckclass", "truckid"] 7 | streamModes: ["read", "write"] 8 | storage: 9 | server: 10 | url: "http://localhost:8080" 11 | protocol: "rabbitmq" 12 | attributes: 13 | protocolAdapterName: "rabbitmq-operator-old" 14 | namespace: "streaming-runtime" 15 | reclaimPolicy: "Retain" 16 | -------------------------------------------------------------------------------- /streaming-runtime-samples/tutorials/3-inline-transformation.yaml: -------------------------------------------------------------------------------- 1 | # 3. Inline (e.g. in SRP Processor) Data Transformation 2 | apiVersion: streaming.tanzu.vmware.com/v1alpha1 3 | kind: Processor 4 | metadata: 5 | name: inline-transformation 6 | spec: 7 | type: SRP 8 | inputs: 9 | - name: data-in 10 | outputs: 11 | - name: data-out 12 | attributes: 13 | srp.output.headers: "user=payload.fullName" 14 | srp.spel.expression: '''{"'' + #jsonPath(payload, ''$.fullName'') + ''":"'' + #jsonPath(payload, ''$.email'') + ''"}''' 15 | -------------------------------------------------------------------------------- /streaming-runtime-operator/manifests/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | namespace: streaming-runtime 4 | commonLabels: 5 | app.kubernetes.io/part-of: streaming-runtime 6 | 7 | resources: 8 | - streaming-runtime-namespace.yaml 9 | - cluster-stream-deployment.yaml 10 | - streaming-runtime-cluster-role.yaml 11 | - streaming-runtime-cluster-role-binding.yaml 12 | - streaming-runtime-namespaced-role.yaml 13 | - streaming-runtime-namespaced-role-binding.yaml 14 | - streaming-runtime-account.yaml -------------------------------------------------------------------------------- /sql-aggregator/.gitignore: -------------------------------------------------------------------------------- 1 | 2 | target/ 3 | !.mvn/wrapper/maven-wrapper.jar 4 | !**/src/main/**/target/ 5 | !**/src/test/**/target/ 6 | 7 | ### STS ### 8 | .apt_generated 9 | .classpath 10 | .factorypath 11 | .project 12 | .settings 13 | .springBeans 14 | .sts4-cache 15 | 16 | ### IntelliJ IDEA ### 17 | .idea 18 | *.iws 19 | *.iml 20 | *.ipr 21 | 22 | ### NetBeans ### 23 | /nbproject/private/ 24 | /nbbuild/ 25 | /dist/ 26 | /nbdist/ 27 | /.nb-gradle/ 28 | build/ 29 | !**/src/main/**/build/ 30 | !**/src/test/**/build/ 31 | 32 | ### VS Code ### 33 | .vscode/ 34 | -------------------------------------------------------------------------------- /sr-common/.gitignore: -------------------------------------------------------------------------------- 1 | HELP.md 2 | target/ 3 | !.mvn/wrapper/maven-wrapper.jar 4 | !**/src/main/**/target/ 5 | !**/src/test/**/target/ 6 | 7 | ### STS ### 8 | .apt_generated 9 | .classpath 10 | .factorypath 11 | .project 12 | .settings 13 | .springBeans 14 | .sts4-cache 15 | 16 | ### IntelliJ IDEA ### 17 | .idea 18 | *.iws 19 | *.iml 20 | *.ipr 21 | 22 | ### NetBeans ### 23 | /nbproject/private/ 24 | /nbbuild/ 25 | /dist/ 26 | /nbdist/ 27 | /.nb-gradle/ 28 | build/ 29 | !**/src/main/**/build/ 30 | !**/src/test/**/build/ 31 | 32 | ### VS Code ### 33 | .vscode/ 34 | -------------------------------------------------------------------------------- /streaming-runtime-operator/streaming-runtime/src/main/resources/manifests/protocol/kafka/kafka-zk-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: kafka-zk 5 | labels: 6 | app: kafka 7 | component: kafka-zk 8 | type: streaming-spike 9 | spec: 10 | ports: 11 | - name: client 12 | port: 2181 13 | protocol: TCP 14 | - name: follower 15 | port: 2888 16 | protocol: TCP 17 | - name: leader 18 | port: 3888 19 | protocol: TCP 20 | selector: 21 | app: kafka-zk 22 | component: kafka-zk -------------------------------------------------------------------------------- /srp-processor/.gitignore: -------------------------------------------------------------------------------- 1 | HELP.md 2 | target/ 3 | !.mvn/wrapper/maven-wrapper.jar 4 | !**/src/main/**/target/ 5 | !**/src/test/**/target/ 6 | 7 | ### STS ### 8 | .apt_generated 9 | .classpath 10 | .factorypath 11 | .project 12 | .settings 13 | .springBeans 14 | .sts4-cache 15 | 16 | ### IntelliJ IDEA ### 17 | .idea 18 | *.iws 19 | *.iml 20 | *.ipr 21 | 22 | ### NetBeans ### 23 | /nbproject/private/ 24 | /nbbuild/ 25 | /dist/ 26 | /nbdist/ 27 | /.nb-gradle/ 28 | build/ 29 | !**/src/main/**/build/ 30 | !**/src/test/**/build/ 31 | 32 | ### VS Code ### 33 | .vscode/ 34 | -------------------------------------------------------------------------------- /abbreviations.md: -------------------------------------------------------------------------------- 1 | 2 | *[SR]: Streaming Runtime 3 | *[SRP]: Streaming Runtime Processor 4 | *[SCS]: Spring Cloud Stream Processor 5 | *[FSQL]: Apache Flink Streaming SQL Processor 6 | *[CR]: Custom Resource 7 | *[CRs]: Custom Resources 8 | *[CRD]: Custom Resource Definition 9 | *[CRDs]: Custom Resource Definitions 10 | *[ETL]: Extract Transform Load 11 | *[ELT]: Extract Load Transform 12 | *[UDF]: User Defined Function 13 | *[UDFs]: User Defined Functions 14 | *[SCF]: Spring Cloud Function 15 | *[SQL]: Structured Query Language 16 | *[CP]: Control Plane 17 | *[DP]: Data Plane -------------------------------------------------------------------------------- /streaming-runtime-operator/.gitignore: -------------------------------------------------------------------------------- 1 | HELP.md 2 | target/ 3 | !.mvn/wrapper/maven-wrapper.jar 4 | !**/src/main/**/target/ 5 | !**/src/test/**/target/ 6 | 7 | ### STS ### 8 | .apt_generated 9 | .classpath 10 | .factorypath 11 | .project 12 | .settings 13 | .springBeans 14 | .sts4-cache 15 | 16 | ### IntelliJ IDEA ### 17 | .idea 18 | *.iws 19 | *.iml 20 | *.ipr 21 | 22 | ### NetBeans ### 23 | /nbproject/private/ 24 | /nbbuild/ 25 | /dist/ 26 | /nbdist/ 27 | /.nb-gradle/ 28 | build/ 29 | !**/src/main/**/build/ 30 | !**/src/test/**/build/ 31 | 32 | ### VS Code ### 33 | .vscode/ 34 | -------------------------------------------------------------------------------- /sql-aggregator/src/main/resources/logback.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | %d{ISO8601} %5p %t %c{2}:%L - %m%n 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | -------------------------------------------------------------------------------- /streaming-runtime-operator/streaming-runtime/src/main/resources/manifests/protocol/kafka/kafka-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: kafka 5 | labels: 6 | app: kafka 7 | component: kafka-broker 8 | type: streaming-spike 9 | spec: 10 | ports: 11 | - port: 9092 12 | name: kafka-port 13 | targetPort: 9092 14 | protocol: TCP 15 | - port: 9094 16 | name: external-kafka-port 17 | targetPort: 9094 18 | protocol: TCP 19 | type: NodePort 20 | selector: 21 | app: kafka 22 | component: kafka-broker 23 | -------------------------------------------------------------------------------- /streaming-runtime-operator/streaming-runtime/src/main/resources/manifests/protocol/rabbitmq/rabbitmq-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: rabbitmq 5 | labels: 6 | app: rabbitmq 7 | component: rabbitmq-broker 8 | type: streaming-spike 9 | spec: 10 | ports: 11 | - port: 5672 12 | targetPort: 5672 13 | protocol: TCP 14 | name: rabbitmq-port 15 | - port: 15672 16 | targetPort: 15672 17 | protocol: TCP 18 | name: rabbitmq-mgmt-port 19 | selector: 20 | app: rabbitmq 21 | component: rabbitmq-broker -------------------------------------------------------------------------------- /streaming-runtime-operator/streaming-runtime/src/main/resources/manifests/processor/generic-streaming-runtime-processor-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: streaming-runtime-processor 5 | labels: 6 | app: streaming-runtime-processor 7 | component: streaming-runtime-processor 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | app: streaming-runtime-processor 13 | template: 14 | metadata: 15 | labels: 16 | app: streaming-runtime-processor 17 | component: streaming-runtime-processor 18 | -------------------------------------------------------------------------------- /streaming-runtime-samples/tests/test-rabbitmq-op-clusterstream2.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: streaming.tanzu.vmware.com/v1alpha1 2 | kind: ClusterStream 3 | metadata: 4 | name: test-rabbitmq-op-clusterstream 5 | spec: 6 | name: my-exchange 7 | keys: ["truckclass", "truckid"] 8 | streamModes: ["read", "write"] 9 | storage: 10 | server: 11 | url: "http://localhost:8080" 12 | protocol: "rabbitmq" 13 | binding: service-binding-ref 14 | attributes: 15 | protocolAdapterName: "rabbitmq-operator" 16 | namespace: "streaming-runtime" 17 | reclaimPolicy: "Retain" 18 | -------------------------------------------------------------------------------- /streaming-runtime-samples/udf-samples/udf-uppercase-go/protos/go.mod: -------------------------------------------------------------------------------- 1 | module github.com/tanzu/streaming-runtimes/udf-uppercase-go/message 2 | 3 | go 1.17 4 | 5 | require ( 6 | google.golang.org/grpc v1.40.0 7 | google.golang.org/protobuf v1.27.1 8 | ) 9 | 10 | require ( 11 | github.com/golang/protobuf v1.5.0 // indirect 12 | golang.org/x/net v0.0.0-20200822124328-c89045814202 // indirect 13 | golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd // indirect 14 | golang.org/x/text v0.3.0 // indirect 15 | google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 // indirect 16 | ) 17 | -------------------------------------------------------------------------------- /streaming-runtime-operator/streaming-runtime/.gitignore: -------------------------------------------------------------------------------- 1 | HELP.md 2 | target/ 3 | !.mvn/wrapper/maven-wrapper.jar 4 | !**/src/main/**/target/ 5 | !**/src/test/**/target/ 6 | 7 | ### STS ### 8 | .apt_generated 9 | .classpath 10 | .factorypath 11 | .project 12 | .settings 13 | .springBeans 14 | .sts4-cache 15 | 16 | ### IntelliJ IDEA ### 17 | .idea 18 | *.iws 19 | *.iml 20 | *.ipr 21 | 22 | ### NetBeans ### 23 | /nbproject/private/ 24 | /nbbuild/ 25 | /dist/ 26 | /nbdist/ 27 | /.nb-gradle/ 28 | build/ 29 | !**/src/main/**/build/ 30 | !**/src/test/**/build/ 31 | 32 | ### VS Code ### 33 | .vscode/ 34 | -------------------------------------------------------------------------------- /streaming-runtime-samples/udf-samples/udf-uppercase-java/.gitignore: -------------------------------------------------------------------------------- 1 | 2 | target/ 3 | !.mvn/wrapper/maven-wrapper.jar 4 | !**/src/main/**/target/ 5 | !**/src/test/**/target/ 6 | 7 | ### STS ### 8 | .apt_generated 9 | .classpath 10 | .factorypath 11 | .project 12 | .settings 13 | .springBeans 14 | .sts4-cache 15 | 16 | ### IntelliJ IDEA ### 17 | .idea 18 | *.iws 19 | *.iml 20 | *.ipr 21 | 22 | ### NetBeans ### 23 | /nbproject/private/ 24 | /nbbuild/ 25 | /dist/ 26 | /nbdist/ 27 | /.nb-gradle/ 28 | build/ 29 | !**/src/main/**/build/ 30 | !**/src/test/**/build/ 31 | 32 | ### VS Code ### 33 | .vscode/ 34 | -------------------------------------------------------------------------------- /udf-utilities/streaming-runtime-udf-aggregator-java/.gitignore: -------------------------------------------------------------------------------- 1 | HELP.md 2 | target/ 3 | !.mvn/wrapper/maven-wrapper.jar 4 | !**/src/main/**/target/ 5 | !**/src/test/**/target/ 6 | 7 | ### STS ### 8 | .apt_generated 9 | .classpath 10 | .factorypath 11 | .project 12 | .settings 13 | .springBeans 14 | .sts4-cache 15 | 16 | ### IntelliJ IDEA ### 17 | .idea 18 | *.iws 19 | *.iml 20 | *.ipr 21 | 22 | ### NetBeans ### 23 | /nbproject/private/ 24 | /nbbuild/ 25 | /dist/ 26 | /nbdist/ 27 | /.nb-gradle/ 28 | build/ 29 | !**/src/main/**/build/ 30 | !**/src/test/**/build/ 31 | 32 | ### VS Code ### 33 | .vscode/ 34 | -------------------------------------------------------------------------------- /streaming-runtime-samples/tutorials/3.1-polyglot-udf-transformation.yaml: -------------------------------------------------------------------------------- 1 | # 3.1 Polyglot UDF Transformation 2 | apiVersion: streaming.tanzu.vmware.com/v1alpha1 3 | kind: Processor 4 | metadata: 5 | name: udf-transformation 6 | spec: 7 | type: SRP 8 | inputs: 9 | - name: data-in 10 | outputs: 11 | - name: data-out 12 | attributes: 13 | srp.grpcPort: "50051" 14 | template: 15 | spec: 16 | containers: 17 | - name: uppercase-grpc-python 18 | # Runs GRPC server on port 50051 19 | image: ghcr.io/vmware-tanzu/streaming-runtimes/udf-uppercase-python:0.1 20 | -------------------------------------------------------------------------------- /NOTICE: -------------------------------------------------------------------------------- 1 | Copyright 2022 VMware, Inc. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. -------------------------------------------------------------------------------- /streaming-runtime-samples/udf-samples/udf-uppercase-python/protos/MessageService.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | option java_multiple_files = true; 3 | package org.springframework.cloud.function.grpc; 4 | 5 | message GrpcMessage { 6 | bytes payload = 1; 7 | map headers = 2; 8 | } 9 | 10 | service MessagingService { 11 | rpc biStream(stream GrpcMessage) returns (stream GrpcMessage); 12 | 13 | rpc clientStream(stream GrpcMessage) returns (GrpcMessage); 14 | 15 | rpc serverStream(GrpcMessage) returns (stream GrpcMessage); 16 | 17 | rpc requestReply(GrpcMessage) returns (GrpcMessage); 18 | } -------------------------------------------------------------------------------- /tools.mk: -------------------------------------------------------------------------------- 1 | ifndef TOOLS_MK # Prevent repeated "-include". 2 | TOOLS_MK := $(lastword $(MAKEFILE_LIST)) 3 | TOOLS_INCLUDE_DIR := $(dir $(TOOLS_MK)) 4 | 5 | # Define the tools here 6 | tools.path := $(abspath $(build_dir)/tools) 7 | tools.bin.path := $(abspath $(tools.path)/bin) 8 | GSUTIL := $(tools.path)/gsutil/gsutil 9 | 10 | # TODO: Change this with the githubway to publish things 11 | $(GSUTIL): 12 | @mkdir -p $(@D) 13 | curl -sL https://storage.googleapis.com/pub/gsutil.tar.gz | tar -xz -C $(tools.path) 14 | 15 | tools.clean: 16 | $(RM) -rf $(tools.path) 17 | 18 | clean .PHONY: tools.clean 19 | 20 | endif 21 | -------------------------------------------------------------------------------- /streaming-runtime-samples/online-gaming-statistics/gaming-user-score/.gitignore: -------------------------------------------------------------------------------- 1 | HELP.md 2 | target/ 3 | !.mvn/wrapper/maven-wrapper.jar 4 | !**/src/main/**/target/ 5 | !**/src/test/**/target/ 6 | 7 | ### STS ### 8 | .apt_generated 9 | .classpath 10 | .factorypath 11 | .project 12 | .settings 13 | .springBeans 14 | .sts4-cache 15 | 16 | ### IntelliJ IDEA ### 17 | .idea 18 | *.iws 19 | *.iml 20 | *.ipr 21 | 22 | ### NetBeans ### 23 | /nbproject/private/ 24 | /nbbuild/ 25 | /dist/ 26 | /nbdist/ 27 | /.nb-gradle/ 28 | build/ 29 | !**/src/main/**/build/ 30 | !**/src/test/**/build/ 31 | 32 | ### VS Code ### 33 | .vscode/ 34 | -------------------------------------------------------------------------------- /streaming-runtime-samples/tutorials/2.1-multibiner-bridge-production-mode.md: -------------------------------------------------------------------------------- 1 | ### 2.1 Multibinder Bridge - production env 2 | 3 | In production environment the Streaming Runtime will not be allowed to auto-provision the messaging brokers dynamically. 4 | Instead the Administrator will provision the required messaging middleware and declare ClusterStream to provide managed and controlled access to it. 5 | 6 | The ClusterStreams and the Streams follow the PersistentVolume model: 7 | namespaced Stream declared by a developer (ala PVC) is backed by a ClusterStream resource (ala PV) which is controlled and provisioned by the administrator. 8 | -------------------------------------------------------------------------------- /udf-utilities/MessageService.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | option java_multiple_files = true; 3 | package org.springframework.cloud.function.grpc; 4 | 5 | message GrpcMessage { 6 | bytes payload = 1; 7 | map headers = 2; 8 | } 9 | 10 | message GrpcPayloadCollection { 11 | repeated bytes payload = 1; 12 | } 13 | 14 | service MessagingService { 15 | rpc biStream(stream GrpcMessage) returns (stream GrpcMessage); 16 | 17 | rpc clientStream(stream GrpcMessage) returns (GrpcMessage); 18 | 19 | rpc serverStream(GrpcMessage) returns (stream GrpcMessage); 20 | 21 | rpc requestReply(GrpcMessage) returns (GrpcMessage); 22 | } -------------------------------------------------------------------------------- /streaming-runtime-operator/skaffold-dev.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: skaffold/v2beta21 2 | kind: Config 3 | 4 | build: 5 | tagPolicy: 6 | customTemplate: 7 | template: "0.0.4-SNAPSHOT" 8 | artifacts: 9 | - image: ghcr.io/vmware-tanzu/streaming-runtimes/streaming-runtime 10 | custom: 11 | buildCommand: | 12 | ./mvnw clean install -P devtools -DskipTests spring-boot:build-image 13 | deploy: 14 | kubectl: 15 | manifests: 16 | - ./crds/cluster-stream-crd.yaml 17 | - ./crds/stream-crd.yaml 18 | - ./crds/processor-crd.yaml 19 | flags: 20 | apply: 21 | - --force 22 | kustomize: 23 | paths: 24 | - manifests -------------------------------------------------------------------------------- /streaming-runtime-operator/streaming-runtime/src/main/resources/manifests/protocol/rabbitmq/rabbitmq-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: rabbitmq-broker 5 | labels: 6 | app: rabbitmq 7 | type: streaming-spike 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | app: rabbitmq 13 | template: 14 | metadata: 15 | labels: 16 | app: rabbitmq 17 | component: rabbitmq-broker 18 | spec: 19 | containers: 20 | - name: rabbitmq 21 | image: rabbitmq:3-management 22 | ports: 23 | - containerPort: 15672 24 | - containerPort: 5672 -------------------------------------------------------------------------------- /streaming-runtime-samples/tutorials/3.2-scs-transformation.md: -------------------------------------------------------------------------------- 1 | ### 3.2 SCS (Spring Cloud Stream) Transformation 2 | 3 | Any [Spring Cloud Stream](https://spring.io/projects/spring-cloud-stream) or [Spring Cloud Function](https://spring.io/projects/spring-cloud-function) application can be run as Processor. 4 | Just build a container image for the application and run it as `spec.type: SCS`` Processor type. 5 | 6 | Spring Cloud DataFlow provides [60+ pre-built SCS/SCF applications](https://docs.spring.io/stream-applications/docs/2021.1.2/reference/html/#applications) that can be used Out-Of-The-Box 7 | 8 | Use the environment variables to configure the Spring application. 9 | 10 | -------------------------------------------------------------------------------- /version.mk: -------------------------------------------------------------------------------- 1 | # After including this file, add the following line to have the version printed 2 | # when invoking the `version` target: 3 | # $(eval $(call VERSION_template,target_name,path_to_VERSION_file)) 4 | # To override any of the values, redefine it after the above line. 5 | define VERSION_template 6 | $(1).version.release := $(shell cat $(2)/VERSION) 7 | $(1).version.dev := dev-$(build_time) 8 | $(1).version.commit := dev-$(git.commit) 9 | $(1).version.branch := dev-$(subst /,_,$(git.branch)) 10 | $(1).version.latest := latest 11 | $(1).version = $$($(1).version.$(build)) 12 | $(1).version: 13 | @echo $(1): $$($(1).version) 14 | 15 | version .PHONY: $(1).version 16 | endef 17 | -------------------------------------------------------------------------------- /udf-utilities/streaming-runtime-udf-aggregator-js/proto/MessageService.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | option java_multiple_files = true; 3 | package org.springframework.cloud.function.grpc; 4 | 5 | message GrpcMessage { 6 | bytes payload = 1; 7 | map headers = 2; 8 | } 9 | 10 | message GrpcPayloadCollection { 11 | repeated bytes payload = 1; 12 | } 13 | 14 | service MessagingService { 15 | rpc biStream(stream GrpcMessage) returns (stream GrpcMessage); 16 | 17 | rpc clientStream(stream GrpcMessage) returns (GrpcMessage); 18 | 19 | rpc serverStream(GrpcMessage) returns (stream GrpcMessage); 20 | 21 | rpc requestReply(GrpcMessage) returns (GrpcMessage); 22 | } -------------------------------------------------------------------------------- /streaming-runtime-samples/udf-samples/udf-uppercase-go/protos/MessageService.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | option java_multiple_files = true; 3 | package org.springframework.cloud.function.grpc; 4 | option go_package = "github.com/tanzu/streaming-runtimes/udf-uppercase-go/message"; 5 | 6 | message GrpcMessage { 7 | bytes payload = 1; 8 | map headers = 2; 9 | } 10 | 11 | service MessagingService { 12 | rpc biStream(stream GrpcMessage) returns (stream GrpcMessage); 13 | 14 | rpc clientStream(stream GrpcMessage) returns (GrpcMessage); 15 | 16 | rpc serverStream(GrpcMessage) returns (stream GrpcMessage); 17 | 18 | rpc requestReply(GrpcMessage) returns (GrpcMessage); 19 | } -------------------------------------------------------------------------------- /streaming-runtime-samples/tutorials/6-user-score-aggregation-js/README.md: -------------------------------------------------------------------------------- 1 | # Online Game Team Statistics Aggregation Processor 2 | 3 | ### Build 4 | 5 | As prerequisite you need `node` and `npm` installed. 6 | 7 | Run `npm install ` to install required `node_modules`. Later are not committed in Git. 8 | 9 | To test the processor locally run: 10 | 11 | ``` 12 | node aggregator.js 13 | ``` 14 | ### Build gaming-team-score Image 15 | 16 | 17 | ``` 18 | export PAT= 19 | echo $PAT | docker login ghcr.io --username --password-stdin 20 | 21 | docker build --tag ghcr.io/vmware-tanzu/streaming-runtimes/user-score-js . 22 | docker push ghcr.io/vmware-tanzu/streaming-runtimes/user-score-js:latest 23 | ``` 24 | 25 | -------------------------------------------------------------------------------- /streaming-runtime-samples/tutorials/6.1-team-score-aggregation-js/README.md: -------------------------------------------------------------------------------- 1 | # Online Game Team Statistics Aggregation Processor 2 | 3 | ### Build 4 | 5 | As prerequisite you need `node` and `npm` installed. 6 | 7 | Run `npm install ` to install required `node_modules`. Later are not committed in Git. 8 | 9 | To test the processor locally run: 10 | 11 | ``` 12 | node aggregator.js 13 | ``` 14 | ### Build gaming-team-score Image 15 | 16 | 17 | ``` 18 | export PAT= 19 | echo $PAT | docker login ghcr.io --username --password-stdin 20 | 21 | docker build --tag ghcr.io/vmware-tanzu/streaming-runtimes/team-score-js . 22 | docker push ghcr.io/vmware-tanzu/streaming-runtimes/team-score-js:latest 23 | ``` 24 | 25 | -------------------------------------------------------------------------------- /streaming-runtime-samples/udf-samples/udf-uppercase-java/src/main/java/net/tzolov/poc/uppercasegrpc/UppercaseGrpcApplication.java: -------------------------------------------------------------------------------- 1 | package net.tzolov.poc.uppercasegrpc; 2 | 3 | import java.util.function.Function; 4 | 5 | import org.springframework.boot.SpringApplication; 6 | import org.springframework.boot.autoconfigure.SpringBootApplication; 7 | import org.springframework.context.annotation.Bean; 8 | 9 | @SpringBootApplication 10 | public class UppercaseGrpcApplication { 11 | 12 | public static void main(String[] args) { 13 | SpringApplication.run(UppercaseGrpcApplication.class, args); 14 | } 15 | 16 | @Bean 17 | public Function uppercase() { 18 | return String::toUpperCase; 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /streaming-runtime-operator/manifests/streaming-runtime-namespaced-role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: Role 3 | metadata: 4 | name: streaming-runtime-resource-role 5 | namespace: streaming-runtime 6 | rules: 7 | - apiGroups: 8 | - apps 9 | resources: 10 | - deployments 11 | - statefulsets 12 | - statefulsets/scale 13 | verbs: 14 | - get 15 | - list 16 | - create 17 | - patch 18 | - watch 19 | - update 20 | - delete 21 | - apiGroups: 22 | - "" # "" indicates the core API group 23 | resources: 24 | - configmaps 25 | - services 26 | - pods 27 | verbs: 28 | - list 29 | - create 30 | - update 31 | - watch -------------------------------------------------------------------------------- /streaming-runtime-samples/online-gaming-statistics/gaming-team-score/README.md: -------------------------------------------------------------------------------- 1 | # Online Game Team Statistics Aggregation Processor 2 | 3 | ### Build 4 | 5 | As prerequisite you need `node` and `npm` installed. 6 | 7 | Run `npm install ` to install required `node_modules`. Later are not committed in Git. 8 | 9 | To test the processor locally run: 10 | 11 | ``` 12 | node aggregator.js 13 | ``` 14 | ### Build gaming-team-score Image 15 | 16 | 17 | ``` 18 | export PAT= 19 | echo $PAT | docker login ghcr.io --username --password-stdin 20 | 21 | docker build --tag ghcr.io/vmware-tanzu/streaming-runtimes/gaming-team-score . 22 | docker push ghcr.io/vmware-tanzu/streaming-runtimes/gaming-team-score:latest 23 | ``` 24 | 25 | -------------------------------------------------------------------------------- /streaming-runtime-samples/tests/test-processor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: streaming.tanzu.vmware.com/v1alpha1 2 | kind: Processor 3 | metadata: 4 | name: processor1 5 | spec: 6 | inputs: 7 | sources: 8 | - name: "kafka-stream-1" 9 | outputs: 10 | - name: "rabbitmq-stream-1" 11 | template: 12 | spec: 13 | containers: 14 | - name: uppercase-grpc 15 | image: ghcr.io/vmware-tanzu/streaming-runtimes/udf-uppercase-java:latest 16 | env: 17 | - name: SPRING_CLOUD_FUNCTION_DEFINITION 18 | value: uppercase 19 | - name: SPRING_CLOUD_FUNCTION_GRPC_MODE 20 | value: server 21 | - name: SPRING_CLOUD_FUNCTION_GRPC_PORT 22 | value: "55554" 23 | 24 | -------------------------------------------------------------------------------- /streaming-runtime-operator/skaffold.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: skaffold/v2beta21 2 | kind: Config 3 | 4 | build: 5 | tagPolicy: 6 | customTemplate: 7 | template: "0.0.4-SNAPSHOT" 8 | artifacts: 9 | - image: ghcr.io/vmware-tanzu/streaming-runtimes/streaming-runtime 10 | custom: 11 | buildCommand: | 12 | ./mvnw clean install -Dnative -DskipTests spring-boot:build-image && \ 13 | docker push ghcr.io/vmware-tanzu/streaming-runtimes/streaming-runtime 14 | deploy: 15 | kubectl: 16 | manifests: 17 | - ./crds/cluster-stream-crd.yaml 18 | - ./crds/stream-crd.yaml 19 | - ./crds/processor-crd.yaml 20 | flags: 21 | apply: 22 | - --force 23 | kustomize: 24 | paths: 25 | - manifests -------------------------------------------------------------------------------- /streaming-runtime-samples/online-gaming-statistics/gaming-team-score/aggregate.js: -------------------------------------------------------------------------------- 1 | const udf = require('streaming-runtime-udf-aggregator'); 2 | 3 | // --------- UDF aggregation function -------- 4 | function aggregate(headers, user, results) { 5 | 6 | if (!results.has(user.team)) { 7 | // Add new empty team to the result map 8 | results.set(user.team, { 9 | from: headers.windowStartTime, 10 | to: headers.windowEndTime, 11 | team: user.team, 12 | totalScore: 0, 13 | }); 14 | } 15 | 16 | // Increment team's score. 17 | let team = results.get(user.team); 18 | 19 | team.totalScore = 20 | Number.parseInt(team.totalScore) + Number.parseInt(user.userTotalScore); 21 | } 22 | 23 | new udf.Aggregator(aggregate).start(); 24 | -------------------------------------------------------------------------------- /streaming-runtime-samples/tutorials/6.1-team-score-aggregation-js/aggregate.js: -------------------------------------------------------------------------------- 1 | const udf = require('streaming-runtime-udf-aggregator'); 2 | 3 | // --------- UDF aggregation function -------- 4 | function aggregate(headers, user, results) { 5 | 6 | if (!results.has(user.team)) { 7 | // Add new empty team aggregate to the result map 8 | results.set(user.team, { 9 | from: headers.windowStartTime, 10 | to: headers.windowEndTime, 11 | team: user.team, 12 | totalScore: 0, 13 | }); 14 | } 15 | 16 | // Increment team's score. 17 | let team = results.get(user.team); 18 | 19 | team.totalScore = 20 | Number.parseInt(team.totalScore) + Number.parseInt(user.score); 21 | } 22 | 23 | new udf.Aggregator(aggregate).start(); 24 | -------------------------------------------------------------------------------- /streaming-runtime-operator/streaming-runtime/src/main/resources/manifests/processor/srp-processor-container-template.yaml: -------------------------------------------------------------------------------- 1 | name: srp-processor 2 | image: ghcr.io/vmware-tanzu/streaming-runtimes/srp-processor:latest 3 | ports: 4 | - containerPort: 8080 5 | env: 6 | - name: SPRING_CLOUD_FUNCTION_GRPC_SERVER 7 | value: "false" 8 | - name: SPRING_CLOUD_FUNCTION_DEFINITION 9 | value: "proxy" 10 | - name: LOGGING_LEVEL_ORG_SPRINGFRAMEWORK_CLOUD_STREAM_BINDER 11 | value: "ERROR" 12 | livenessProbe: 13 | httpGet: 14 | path: /actuator/health 15 | port: 8080 16 | initialDelaySeconds: 10 17 | periodSeconds: 60 18 | readinessProbe: 19 | httpGet: 20 | path: /actuator/info 21 | port: 8080 22 | initialDelaySeconds: 10 23 | periodSeconds: 10 -------------------------------------------------------------------------------- /streaming-runtime-samples/udf-samples/udf-uppercase-go/go.mod: -------------------------------------------------------------------------------- 1 | module github.com/tanzu/streaming-runtimes/udf-uppercase-go 2 | 3 | go 1.17 4 | 5 | require ( 6 | github.com/tanzu/streaming-runtimes/udf-uppercase-go/message v0.0.0-00010101000000-000000000000 7 | google.golang.org/grpc v1.40.0 8 | ) 9 | 10 | require ( 11 | github.com/golang/protobuf v1.5.2 // indirect 12 | golang.org/x/net v0.0.0-20200822124328-c89045814202 // indirect 13 | golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd // indirect 14 | golang.org/x/text v0.3.0 // indirect 15 | google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 // indirect 16 | google.golang.org/protobuf v1.27.1 // indirect 17 | ) 18 | 19 | replace github.com/tanzu/streaming-runtimes/udf-uppercase-go/message => ./protos/ 20 | -------------------------------------------------------------------------------- /streaming-runtime-operator/streaming-runtime/src/test/java/com/vmware/tanzu/streaming/runtime/KubernetesComponentTest.java: -------------------------------------------------------------------------------- 1 | package com.vmware.tanzu.streaming.runtime; 2 | 3 | import java.lang.annotation.ElementType; 4 | import java.lang.annotation.Inherited; 5 | import java.lang.annotation.Retention; 6 | import java.lang.annotation.RetentionPolicy; 7 | import java.lang.annotation.Target; 8 | 9 | import org.junit.jupiter.api.TestInstance; 10 | import org.junit.jupiter.api.TestInstance.Lifecycle; 11 | import org.junit.jupiter.api.extension.ExtendWith; 12 | 13 | @Target(ElementType.TYPE) 14 | @Retention(RetentionPolicy.RUNTIME) 15 | @Inherited 16 | @TestInstance(Lifecycle.PER_CLASS) 17 | @ExtendWith(LocalClusterExtension.class) 18 | public @interface KubernetesComponentTest { 19 | 20 | } -------------------------------------------------------------------------------- /streaming-runtime-samples/tutorials/6-user-score-aggregation-js/aggregate.js: -------------------------------------------------------------------------------- 1 | const udf = require('streaming-runtime-udf-aggregator'); 2 | 3 | // --------- UDF aggregation function -------- 4 | function aggregate(headers, user, results) { 5 | 6 | if (!results.has(user.fullName)) { 7 | // Add new empty user aggregate to the result map 8 | results.set(user.fullName, { 9 | from: headers.windowStartTime, 10 | to: headers.windowEndTime, 11 | name: user.fullName, 12 | totalScore: 0, 13 | }); 14 | } 15 | 16 | // Increment user's score. 17 | let userAggregate = results.get(user.fullName); 18 | 19 | userAggregate.totalScore = 20 | Number.parseInt(userAggregate.totalScore) + Number.parseInt(user.score); 21 | } 22 | 23 | new udf.Aggregator(aggregate).start(); 24 | -------------------------------------------------------------------------------- /docs/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3-alpine 2 | 3 | ARG USER=1001 4 | 5 | RUN adduser -h /usr/src/mkdocs -D -u $USER mkdocs \ 6 | && apk add bash \ 7 | && apk add git 8 | 9 | ENV PATH="${PATH}:/usr/src/mkdocs/.local/bin" 10 | 11 | USER mkdocs 12 | RUN mkdir -p /usr/src/mkdocs/build 13 | WORKDIR /usr/src/mkdocs/build 14 | 15 | RUN pip install --upgrade pip 16 | 17 | RUN pip install pymdown-extensions \ 18 | && pip install mkdocs \ 19 | && pip install mkdocs-material \ 20 | && pip install mkdocs-rtd-dropdown \ 21 | && pip install mkdocs-git-revision-date-plugin \ 22 | && pip install mkdocs-git-revision-date-localized-plugin 23 | 24 | COPY ./streaming-runtime-samples ./streaming-runtime-samples 25 | COPY ./material ./material 26 | COPY mkdocs.yml . 27 | 28 | EXPOSE 8000 29 | 30 | ENTRYPOINT ["mkdocs", "serve"] 31 | -------------------------------------------------------------------------------- /streaming-runtime-operator/streaming-runtime/src/main/resources/manifests/protocol/kafka/kafka-zk-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: kafka-zk 5 | labels: 6 | app: kafka 7 | component: kafka-zk 8 | type: streaming-spike 9 | spec: 10 | replicas: 1 11 | selector: 12 | matchLabels: 13 | app: kafka-zk 14 | template: 15 | metadata: 16 | labels: 17 | app: kafka-zk 18 | component: kafka-zk 19 | spec: 20 | containers: 21 | - name: kafka-zk 22 | image: digitalwonderland/zookeeper 23 | ports: 24 | - containerPort: 2181 25 | env: 26 | - name: ZOOKEEPER_ID 27 | value: "1" 28 | - name: ZOOKEEPER_SERVER_1 29 | value: kafka-zk -------------------------------------------------------------------------------- /streaming-runtime-samples/tutorials/8-secrets-management-with-service-binding.md: -------------------------------------------------------------------------------- 1 | ## 8. Secretes Management with Service Binding spec 2 | 3 | We can use the RabbitMQ Cluster Operator to provision our RabbitMQ Cluster and the ServiceBinding Operator to share the RabbitMQ credentials with the processors that would need to connect to it. 4 | 5 | Prerequisites: 6 | 7 | - Service Binding Operator: https://vmware-tanzu.github.io/streaming-runtimes/install/#optional-install-service-binding-operator 8 | - RabbitMQ Cluster and Message Topology Operators: https://vmware-tanzu.github.io/streaming-runtimes/install/#optional-install-rabbitmq-cluster-and-message-topology-operators 9 | 10 | Complete [service binding documentation](https://vmware-tanzu.github.io/streaming-runtimes/architecture/service-binding/service-binding/) 11 | -------------------------------------------------------------------------------- /ARCHIVED.md: -------------------------------------------------------------------------------- 1 | # Archived Status 2 | 3 | VMware is pausing efforts to continue the development of the Streaming Runtime Operator. This effort was bigger than we expected with the resources available at hand, and our hope is to continue the development of the Streaming Runtime Operator when the team and contributors grow. 4 | 5 | ## Where do we stop? 6 | 7 | With the current implementation of the Streaming Runtime project, you can build a simple streaming pipeline using the `Processor` and `Stream` resources provided by the operator. Learn more about what you do with them in the [Usage](https://vmware-tanzu.github.io/streaming-runtimes/streaming-runtime-overview/) section. 8 | 9 | ## How to reach the team 10 | 11 | You are welcome to create an issue in this repository or ask a question on [Slack](https://kubernetes.slack.com/archives/C03GFRBHM43). 12 | -------------------------------------------------------------------------------- /streaming-runtime-samples/anomaly-detection/light/fraud-detection-udf-js/README.md: -------------------------------------------------------------------------------- 1 | 2 | This JS UDF uses the https://www.npmjs.com/package/streaming-runtime-udf-aggregator package to implement the message aggregation. 3 | 4 | ### Build Docker image 5 | 6 | ``` 7 | export PAT= 8 | echo $PAT | docker login ghcr.io --username --password-stdin 9 | 10 | docker build --tag ghcr.io/vmware-tanzu/streaming-runtimes/udf-anomaly-detection-js . 11 | docker push ghcr.io/vmware-tanzu/streaming-runtimes/udf-anomaly-detection-js:latest 12 | ``` 13 | 14 | you can run the image locally: 15 | 16 | ``` 17 | docker run -p 50051:50051 udf-anomaly-detection-js:latest 18 | ``` 19 | 20 | ### Test locally 21 | 22 | Install the modules: 23 | ``` 24 | npm install 25 | ``` 26 | 27 | and run locally: 28 | ``` 29 | node fraud-detector.js 30 | ``` -------------------------------------------------------------------------------- /streaming-runtime-operator/streaming-runtime/src/main/resources/manifests/processor/statefulset-template.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: StatefulSet 3 | metadata: 4 | name: streaming-runtime-processor 5 | labels: 6 | app: streaming-runtime-processor 7 | component: streaming-runtime-processor 8 | spec: 9 | serviceName: "streaming-runtime-processor" 10 | replicas: 1 11 | selector: 12 | matchLabels: 13 | app: streaming-runtime-processor 14 | template: 15 | metadata: 16 | labels: 17 | app: streaming-runtime-processor 18 | component: streaming-runtime-processor 19 | volumeClaimTemplates: 20 | - metadata: 21 | name: config 22 | spec: 23 | accessModes: [ "ReadWriteOnce" ] 24 | storageClassName: standard 25 | resources: 26 | requests: 27 | storage: 0.1Gi 28 | -------------------------------------------------------------------------------- /streaming-runtime-operator/streaming-runtime/src/main/resources/manifests/processor/sql-aggregation-container-template.yaml: -------------------------------------------------------------------------------- 1 | name: sql-aggregator 2 | image: ghcr.io/vmware-tanzu/streaming-runtimes/sql-aggregator:latest 3 | resources: 4 | limits: 5 | memory: "4Gi" 6 | requests: 7 | memory: "2Gi" 8 | volumeMounts: 9 | - name: config 10 | mountPath: /config 11 | ports: 12 | - containerPort: 8080 13 | - containerPort: 8089 14 | # - containerPort: 5006 15 | # name: jdwp 16 | # protocol: TCP 17 | env: 18 | - name: MANAGEMENT_ENDPOINT_HEALTH_SHOW-DETAILS 19 | value: "ALWAYS" 20 | - name: MANAGEMENT_ENDPOINTS_WEB_EXPOSURE_INCLUDE 21 | value: "*" 22 | - name: SPRING_CONFIG_LOCATION 23 | value: "file:/config/application.yaml" 24 | # - name: JAVA_TOOL_OPTIONS 25 | # value: -agentlib:jdwp=transport=dt_socket,server=y,address=5006,suspend=y,quiet=y 26 | -------------------------------------------------------------------------------- /streaming-runtime-samples/tutorials/6-time-window-aggregation.md: -------------------------------------------------------------------------------- 1 | ## 6. Tumbling Time-Window Aggregation 2 | 3 | The (SRP) processor supports Tumbling Time-Window Aggregation. 4 | The 'srp.window' attribute defines the window interval. 5 | The processor collects inbound messages into time-window groups based on the event-time computed for every message. 6 | The event-time is computed from message's payload or header metadata. 7 | The inbound Stream 'spec.dataSchemaContext.timeAttributes' defines which payload field (or header attribute) to be used as an Event-Time. 8 | Furthermore the Watermark expression allows configuring out-of-orderness. 9 | When no event-time is configured the processor defaults to the less reliable process-time as event-time. 10 | 11 | Complete [Tumbling Time-Window](https://vmware-tanzu.github.io/streaming-runtimes/architecture/processors/srp/time-window-aggregation) documentation 12 | 13 | -------------------------------------------------------------------------------- /streaming-runtime-samples/udf-samples/udf-uppercase-python/message_service_client.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | import logging 3 | import sys 4 | 5 | import grpc 6 | 7 | import MessageService_pb2 8 | import MessageService_pb2_grpc 9 | 10 | 11 | def run(input_text): 12 | with grpc.insecure_channel('localhost:50051') as channel: 13 | stub = MessageService_pb2_grpc.MessagingServiceStub(channel) 14 | message_to_send = MessageService_pb2.GrpcMessage(payload=str.encode(input_text)) 15 | message_to_send.headers["Hi"] = "Oleg" 16 | response = stub.requestReply(message_to_send) 17 | 18 | print("Client received Payload: %s and Headers: %s" % (response.payload.decode(), response.headers)) 19 | 20 | 21 | if __name__ == '__main__': 22 | logging.basicConfig() 23 | if len(sys.argv) > 1: 24 | msg = sys.argv[1] 25 | else: 26 | msg = 'default test message' 27 | run(msg) 28 | -------------------------------------------------------------------------------- /streaming-runtime-samples/tutorials/3.1-polyglot-udf-transformation.md: -------------------------------------------------------------------------------- 1 | ### 3.1 Polyglot UDF Transformation 2 | 3 | The (SRP) Processor can be assigned with custom User Defined Function running in a sidecar container next to the processor in the same Pod. 4 | Processor calls the UDF either for every received message or in the case of temporal aggregation calls it once the aggregate is ready. 5 | 6 | The communication between the Processor and the custom UDF is performed over gRPC using well defined Protocol Buffer contract. 7 | Because the Protocol Buffers are language-neutral, this allows implementing the UDF in any language of choice! (e.g. polyglot UDF). 8 | 9 | Detailed UDF documentation: https://vmware-tanzu.github.io/streaming-runtimes/architecture/processors/srp/udf-overview/ 10 | 11 | Note: The inline transformations can be applied on the outbound message (e.g. the UDF response) before it is sent. 12 | 13 | Note: SRP specific feature. 14 | -------------------------------------------------------------------------------- /docs/assets/images/github.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | -------------------------------------------------------------------------------- /streaming-runtime-samples/tutorials/1-message-retransmission.md: -------------------------------------------------------------------------------- 1 | ## 1. Message Retransmission 2 | 3 | Processor re-transmits, unchanged, the events/messages received from the input (data-in) Stream into the output (data-out) Stream. 4 | The data-in and data-out Stream (CRD) resources are auto-provisioned using the runtime operator defaults, such a Kafka as a default protocol. 5 | The Stream resources, in turn, auto-provision their ClusterStreams (CRD) applying the '-cluster-stream' naming convention. 6 | Finally the ClusterStream controllers will provision the required brokers for the target protocols (e.g. Kafka, RabbitMQ...). 7 | 8 | Currently 3 processor types are supported (if omitted is defaults to SRP): 9 | 10 | - SRP (default) - time-windowed, side-car UDF processor. 11 | - SCS - Spring Cloud Stream/Function processor. 12 | - FSQL - Apache Flink (inline) streaming SQL processor. 13 | 14 | One can combine multiple different processor types in the same data pipelines. 15 | -------------------------------------------------------------------------------- /streaming-runtime-samples/tutorials/2-multibiner-bridge.yaml: -------------------------------------------------------------------------------- 1 | # 2. Multibinder (e.g. multi-message brokers) Bridge 2 | apiVersion: streaming.tanzu.vmware.com/v1alpha1 3 | kind: Stream 4 | metadata: 5 | name: data-in-stream 6 | spec: 7 | name: data-in 8 | protocol: "kafka" 9 | --- 10 | apiVersion: streaming.tanzu.vmware.com/v1alpha1 11 | kind: Processor 12 | metadata: 13 | name: multibinder-processor 14 | spec: 15 | type: SRP 16 | inputs: 17 | - name: data-in-stream 18 | outputs: 19 | - name: data-out-stream 20 | --- 21 | apiVersion: streaming.tanzu.vmware.com/v1alpha1 22 | kind: Stream 23 | metadata: 24 | name: data-out-stream 25 | spec: 26 | name: data-out 27 | protocol: "rabbitmq" 28 | # attributes: 29 | # protocolAdapterName: "rabbitmq-operator" 30 | # 31 | # Prerequisites to provision RabbitMQ clusters with the the "rabbitmq-operator": 32 | # https://vmware-tanzu.github.io/streaming-runtimes/install/#optional-install-rabbitmq-cluster-and-message-topology-operators 33 | # 34 | -------------------------------------------------------------------------------- /sql-aggregator/README.md: -------------------------------------------------------------------------------- 1 | # SQL Streaming Aggregator 2 | 3 | ## Overview 4 | Spring Boot app that embeds a Apache Flink cluster (e.g. Flink local environment execution). 5 | 6 | The `SqlAggregatorApplicationProperties#executeSql` property defines the list of SQL statements to be executed. 7 | The SQL statements are executed in the order of their definition! 8 | 9 | If the `SqlAggregatorApplicationProperties#continuousQuery` property is set the corresponding 10 | SQL statement is executed as a last statement and the query result is printed on the standard output. 11 | This is useful to run continuous SQL query. 12 | 13 | 14 | ## Music Charts Demo 15 | The Music charts application demo defines these SQL statements: [application.properties](./src/main/resources/application.properties). 16 | 17 | ## Build/push docker image 18 | ``` 19 | ./mvnw clean install 20 | docker build -t ghcr.io/vmware-tanzu/streaming-runtimes/sql-aggregator:latest . 21 | docker push ghcr.io/vmware-tanzu/streaming-runtimes/sql-aggregator:latest 22 | ``` -------------------------------------------------------------------------------- /streaming-runtime-operator/pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 4 | 4.0.0 5 | 6 | org.springframework.boot 7 | spring-boot-starter-parent 8 | 2.6.1 9 | 10 | 11 | com.vmware.tanzu.streaming 12 | streaming-runtime-operator 13 | 0.0.1-SNAPSHOT 14 | streaming-runtime-operator 15 | Streaming-Runtime Kubernetes Operator 16 | pom 17 | 18 | streaming-runtime 19 | 20 | 21 | 11 22 | 23 | 24 | 25 | -------------------------------------------------------------------------------- /CLA.md: -------------------------------------------------------------------------------- 1 | ## Contributor License Agreement 2 | 3 | All contributors to this project must have a signed Contributor License Agreement (“CLA”) on file with us. The CLA grants us the permissions we need to use and redistribute your contributions as part of the project; you or your employer retain the copyright to your contribution. Before a PR can pass all required checks, our CLA action will prompt you to accept the agreement. Head over to https://cla.vmware.com/ to see your current agreement(s) on file or to sign a new one. 4 | 5 | We generally only need you (or your employer) to sign our CLA once and once signed, you should be able to submit contributions to any VMware project. 6 | 7 | Note: a signed CLA is required even for minor updates. If you see something trivial that needs to be fixed, but are unable or unwilling to sign a CLA, the maintainers will be happy to make the change on your behalf. If you can describe the change in a [bug report](https://github.com/vmware-tanzu/community-edition/issues/new/choose), it would be greatly appreciated. -------------------------------------------------------------------------------- /sr-common/src/main/java/com/tanzu/streaming/runtime/processor/common/proto/GrpcPayloadCollectionOrBuilder.java: -------------------------------------------------------------------------------- 1 | // Generated by the protocol buffer compiler. DO NOT EDIT! 2 | // source: payload_collection.proto 3 | 4 | package com.tanzu.streaming.runtime.processor.common.proto; 5 | 6 | public interface GrpcPayloadCollectionOrBuilder extends 7 | // @@protoc_insertion_point(interface_extends:GrpcPayloadCollection) 8 | com.google.protobuf.MessageOrBuilder { 9 | 10 | /** 11 | * repeated bytes payload = 1; 12 | * @return A list containing the payload. 13 | */ 14 | java.util.List getPayloadList(); 15 | /** 16 | * repeated bytes payload = 1; 17 | * @return The count of payload. 18 | */ 19 | int getPayloadCount(); 20 | /** 21 | * repeated bytes payload = 1; 22 | * @param index The index of the element to return. 23 | * @return The payload at the given index. 24 | */ 25 | com.google.protobuf.ByteString getPayload(int index); 26 | } 27 | -------------------------------------------------------------------------------- /streaming-runtime-samples/udf-samples/udf-uppercase-python/message_service_server.py: -------------------------------------------------------------------------------- 1 | from concurrent import futures 2 | import logging 3 | 4 | import grpc 5 | 6 | import MessageService_pb2 7 | import MessageService_pb2_grpc 8 | 9 | 10 | class MessageService(MessageService_pb2_grpc.MessagingServiceServicer): 11 | def requestReply(self, request, context): 12 | print("Server received Payload: %s and Headers: %s" % (request.payload.decode(), request.headers)) 13 | return MessageService_pb2.GrpcMessage( 14 | payload=str.encode(request.payload.decode().upper()), headers=request.headers) 15 | 16 | 17 | def serve(): 18 | server = grpc.server(futures.ThreadPoolExecutor(max_workers=10)) 19 | MessageService_pb2_grpc.add_MessagingServiceServicer_to_server(MessageService(), server) 20 | server.add_insecure_port('[::]:50051') 21 | server.start() 22 | server.wait_for_termination() 23 | 24 | 25 | if __name__ == '__main__': 26 | logging.basicConfig() 27 | print("gRPC server started on port: 50051 ...") 28 | serve() 29 | -------------------------------------------------------------------------------- /sr-common/src/main/java/com/tanzu/streaming/runtime/processor/common/avro/AvroMessageReader.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2002-2020 the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * https://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package com.tanzu.streaming.runtime.processor.common.avro; 18 | 19 | import org.apache.avro.generic.GenericRecord; 20 | 21 | import org.springframework.messaging.Message; 22 | 23 | public interface AvroMessageReader { 24 | 25 | GenericRecord toGenericRecord(Messagemessage); 26 | 27 | } 28 | -------------------------------------------------------------------------------- /streaming-runtime-operator/manifests/cluster-stream-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: streaming-runtime 5 | spec: 6 | selector: 7 | matchLabels: 8 | app: streaming-runtime 9 | template: 10 | metadata: 11 | labels: 12 | app: streaming-runtime 13 | spec: 14 | serviceAccountName: streaming-runtime 15 | containers: 16 | - name: cluster-stream-operator 17 | image: ghcr.io/vmware-tanzu/streaming-runtimes/streaming-runtime:0.0.4-SNAPSHOT 18 | env: 19 | - name: LOGGING_COM_VMWARE_TANZU_STREAMING_RUNTIME 20 | value: debug 21 | - name: STREAMING_RUNTIME_OPERATOR_AUTOPROVISIONCLUSTERSTREAM 22 | value: "true" 23 | - name: STREAMING_RUNTIME_OPERATOR_AUTOPROVISIONSTREAM 24 | value: "true" 25 | resources: 26 | requests: 27 | memory: "256Mi" 28 | cpu: "100m" 29 | limits: 30 | memory: "512Mi" 31 | cpu: "500m" 32 | -------------------------------------------------------------------------------- /streaming-runtime-samples/tutorials/7-streaming-join-with-flink-sql.md: -------------------------------------------------------------------------------- 1 | ## 7. FSQL processor examples 2 | 3 | - [Anomaly Detection](https://vmware-tanzu.github.io/streaming-runtimes/samples/anomaly-detection/anomaly-detection/) (FSQL, SRP)- detect, in real time, suspicious credit card transactions, and extract them for further processing. 4 | - [Clickstream Analysis](https://vmware-tanzu.github.io/streaming-runtimes/samples/clickstream/clickstream/) (FSQL, SRP) - for an input clickstream stream, we want to know who are the high status customers, currently using the website so that we can engage with them or to find how much they buy or how long they stay on the site that day. 5 | - [IoT Monitoring analysis](https://vmware-tanzu.github.io/streaming-runtimes/samples/iot-monitoring/iot-monitoring/) (FSQL, SRP) - real-time analysis of IoT monitoring log. 6 | - [Streaming Music Service](https://vmware-tanzu.github.io/streaming-runtimes/samples/top-k-songs/top-k-songs/) (FSQL, SRP) - music ranking application that continuously computes the latest Top-K music charts based on song play events collected in real-time. -------------------------------------------------------------------------------- /streaming-runtime-operator/streaming-runtime/src/main/resources/manifests/protocol/kafka/kafka-schema-registry-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: s-registry 5 | labels: 6 | app: kafka 7 | component: s-registry 8 | type: streaming-spike 9 | spec: 10 | replicas: 1 11 | selector: 12 | matchLabels: 13 | app: s-registry 14 | template: 15 | metadata: 16 | labels: 17 | app: s-registry 18 | component: s-registry 19 | spec: 20 | containers: 21 | - name: s-registry 22 | image: confluentinc/cp-schema-registry:5.2.5-10 23 | ports: 24 | - containerPort: 8081 25 | env: 26 | - name: SCHEMA_REGISTRY_HOST_NAME 27 | value: "s-registry" 28 | - name: SCHEMA_REGISTRY_LISTENERS 29 | value: "http://0.0.0.0:8081" 30 | - name: SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS 31 | value: "PLAINTEXT://kafka:9092" 32 | - name: SCHEMA_REGISTRY_DEBUG 33 | value: "true" 34 | -------------------------------------------------------------------------------- /streaming-runtime-samples/udf-samples/udf-uppercase-python/README.md: -------------------------------------------------------------------------------- 1 | 2 | ### Generate proto stubs 3 | 4 | * Generate the stubs for `protos/MessageService.proto`. From within the root project folder run: 5 | 6 | ```shell 7 | python3 -m pip install grpcio 8 | python3 -m pip install grpcio-tools 9 | python3 -m grpc_tools.protoc -I./protos/ --python_out=. --grpc_python_out=. MessageService.proto 10 | ``` 11 | 12 | If successful it will generate the `MessageService_pb2.py` and `MessageService_pb2_grpc.py`. 13 | 14 | ### Build container image 15 | 16 | Build docker image and push to Docker Hub. 17 | ```bash 18 | docker build -t ghcr.io/vmware-tanzu/streaming-runtimes/udf-uppercase-python:0.1 . 19 | docker push ghcr.io/vmware-tanzu/streaming-runtimes/udf-uppercase-python:0.1 20 | ``` 21 | 22 | Run the `udf-uppercase-python` image: 23 | ``` 24 | docker run -it -p50051:50051 ghcr.io/vmware-tanzu/streaming-runtimes/udf-uppercase-python:0.1 25 | ``` 26 | 27 | Then run the `message_service_client.py` to test the protocol: 28 | 29 | ``` 30 | python ./message_service_client.py "my test message" 31 | ``` 32 | -------------------------------------------------------------------------------- /streaming-runtime-operator/scripts/all.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Copyright 2020 The Kubernetes Authors. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | # After any CRDs change run this script to generate-streaming-runtime-crd.sh and to the 18 | # build-streaming-runtime-operator-installer.sh 19 | 20 | set -euo pipefail 21 | 22 | readonly PROJECT_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")"/.. && pwd)" 23 | 24 | bash "$PROJECT_ROOT"/scripts/generate-streaming-runtime-crd.sh 25 | bash "$PROJECT_ROOT"/scripts/build-streaming-runtime-operator-installer.sh 26 | -------------------------------------------------------------------------------- /udf-utilities/streaming-runtime-udf-aggregator-js/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "streaming-runtime-udf-aggregator", 3 | "version": "1.0.6", 4 | "description": "Helper library to build Tanzu Streaming Runtime UDF in JavaScript", 5 | "main": "streaming-aggregator.js", 6 | "scripts": { 7 | "test": "echo \"Error: no test specified\" && exit 1" 8 | }, 9 | "keywords": [ 10 | "Streaming", 11 | "Runtime" 12 | ], 13 | "author": "Christian Tzolov", 14 | "license": "Apache-2.0", 15 | "dependencies": { 16 | "@grpc/grpc-js": "^1.6.7", 17 | "@grpc/proto-loader": "^0.6.13", 18 | "google-protobuf": "^3.20.1", 19 | "grpc-tools": "^1.11.2", 20 | "media-typer": "^1.1.0" 21 | }, 22 | "repository": { 23 | "type": "git", 24 | "url": "git+https://github.com/vmware-tanzu/streaming-runtimes.git" 25 | }, 26 | "bugs": { 27 | "url": "https://github.com/vmware-tanzu/streaming-runtimes/issues" 28 | }, 29 | "homepage": "https://github.com/vmware-tanzu/streaming-runtimes#readme", 30 | "directories": { 31 | "proto": "proto", 32 | "generated": "generated" 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /sr-common/.mvn/wrapper/maven-wrapper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one 2 | # or more contributor license agreements. See the NOTICE file 3 | # distributed with this work for additional information 4 | # regarding copyright ownership. The ASF licenses this file 5 | # to you under the Apache License, Version 2.0 (the 6 | # "License"); you may not use this file except in compliance 7 | # with the License. You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, 12 | # software distributed under the License is distributed on an 13 | # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 14 | # KIND, either express or implied. See the License for the 15 | # specific language governing permissions and limitations 16 | # under the License. 17 | distributionUrl=https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.8.3/apache-maven-3.8.3-bin.zip 18 | wrapperUrl=https://repo.maven.apache.org/maven2/org/apache/maven/wrapper/maven-wrapper/3.1.0/maven-wrapper-3.1.0.jar 19 | -------------------------------------------------------------------------------- /streaming-runtime-samples/tutorials/3.2-scs-transformation.yaml: -------------------------------------------------------------------------------- 1 | # 3.2 SCS (Spring Cloud Stream) Transformation 2 | apiVersion: streaming.tanzu.vmware.com/v1alpha1 3 | kind: Processor 4 | metadata: 5 | name: time-source 6 | spec: 7 | type: SCS 8 | outputs: 9 | - name: timestamps-stream 10 | template: 11 | spec: 12 | containers: 13 | - name: scdf-time-source-kafka 14 | image: springcloudstream/time-source-kafka:3.2.0 15 | env: 16 | - name: SPRING_CLOUD_STREAM_POLLER_FIXED-DELAY 17 | value: "2000" 18 | - name: TIME_DATE-FORMAT 19 | value: "dd/MM/yyyy HH:mm:ss" 20 | --- 21 | apiVersion: streaming.tanzu.vmware.com/v1alpha1 22 | kind: Processor 23 | metadata: 24 | name: log-sink 25 | spec: 26 | type: SCS 27 | inputs: 28 | - name: timestamps-stream 29 | template: 30 | spec: 31 | containers: 32 | - name: scdf-log-sink-kafka 33 | image: springcloudstream/log-sink-kafka:3.2.0 34 | env: 35 | - name: LOG_EXPRESSION 36 | value: "'My uppercase timestamp is: ' + payload" 37 | -------------------------------------------------------------------------------- /docs/_index.md_: -------------------------------------------------------------------------------- 1 | # Overview 2 | 3 | Kubernetes' execution environment, designed to simplify the development and the operation of streaming data processing applications. 4 | It enables complex data processing scenarios including Time Windowing Aggregation, streaming joins as well as user-defined functions to process the streamed data. 5 | 6 | For full documentation visit [mkdocs.org](https://www.mkdocs.org). 7 | 8 | ## Documentation 9 | 10 | The [Streaming Runtime Samples](./samples) offers a good starting point to start learning how to build streaming pipeline and what are the components involved. 11 | 12 | ## Contributing 13 | 14 | The streaming-runtimes project team welcomes contributions from the community. Before you start working with streaming-runtimes, please 15 | read our [Developer Certificate of Origin](https://cla.vmware.com/dco). All contributions to this repository must be 16 | signed as described on that page. Your signature certifies that you wrote the patch or have the right to pass it on 17 | as an open-source patch. For more detailed information, refer to [CONTRIBUTING.md](CONTRIBUTING.md). 18 | 19 | ## License 20 | -------------------------------------------------------------------------------- /streaming-runtime-samples/online-gaming-statistics/gaming-user-score/src/test/java/com/tanzu/streaming/runtime/udf/gaming/user/score/GamingUserScoreApplicationTests.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2022-2022 the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * https://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.tanzu.streaming.runtime.udf.gaming.user.score; 17 | 18 | import org.junit.jupiter.api.Test; 19 | 20 | import org.springframework.boot.test.context.SpringBootTest; 21 | 22 | @SpringBootTest 23 | class GamingUserScoreApplicationTests { 24 | 25 | @Test 26 | void contextLoads() { 27 | } 28 | 29 | } 30 | -------------------------------------------------------------------------------- /streaming-runtime-operator/streaming-runtime/src/main/resources/manifests/protocol/kafka/kafka-kowl-ui-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: kafka-kowl-ui 5 | labels: 6 | app: kafka-kowl-ui 7 | component: kafka-kowl-ui 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | app: kafka-kowl-ui 13 | template: 14 | metadata: 15 | labels: 16 | app: kafka-kowl-ui 17 | component: kafka-kowl-ui 18 | spec: 19 | terminationGracePeriodSeconds: 15 20 | containers: 21 | - name: kafka-kowl-ui 22 | image: quay.io/cloudhut/kowl:latest 23 | ports: 24 | - containerPort: 8080 25 | env: 26 | - name: MY_NAMESPACE 27 | valueFrom: 28 | fieldRef: 29 | fieldPath: metadata.namespace 30 | - name: KAFKA_BROKERS 31 | value: kafka.$(MY_NAMESPACE).svc.cluster.local:9092 32 | - name: KAFKA_SCHEMAREGISTRY_ENABLED 33 | value: "true" 34 | - name: KAFKA_SCHEMAREGISTRY_URLS 35 | value: http://s-registry.$(MY_NAMESPACE).svc.cluster.local:8081 36 | -------------------------------------------------------------------------------- /srp-processor/src/main/java/com/tanzu/streaming/runtime/srp/timestamp/ProcTimestampAssigner.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2022-2022 the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * https://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.tanzu.streaming.runtime.srp.timestamp; 17 | 18 | import org.springframework.messaging.Message; 19 | 20 | /** 21 | * Computes a processing timestamp (e.g. the time when record is processed). 22 | */ 23 | public class ProcTimestampAssigner implements RecordTimestampAssigner{ 24 | 25 | @Override 26 | public long extractTimestamp(Message record) { 27 | return System.currentTimeMillis(); 28 | } 29 | 30 | } 31 | 32 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | RULES.MK := $(abspath $(dir $(lastword $(MAKEFILE_LIST))))/rules.mk 2 | include $(RULES.MK) 3 | 4 | # Include subdirs 5 | #SUBDIRS := streaming-runtime-operator multibinder-grpc sql-aggregator 6 | SUBDIRS := multibinder-grpc sql-aggregator 7 | $(foreach dir,$(SUBDIRS),$(eval $(call INCLUDE_FILE, $(dir)))) 8 | 9 | all: 10 | 11 | #operator: 12 | sql-aggregator: 13 | multibinder-grpc: 14 | 15 | #publish-streaming-operator: 16 | #publish-stream-data-generator: 17 | publish-multibinder-grpc: 18 | publish-sql-aggregator: 19 | #publish-udf-examples: 20 | 21 | tests: 22 | #streaming-runtime-operator-tests: 23 | multibinder-grpc-tests: 24 | sql-aggregator-tests: 25 | #stream-data-generator-tests: 26 | #smoke-tests: 27 | 28 | clean: 29 | 30 | # TODO: Maybe move this to the docs folder 31 | .PHONY: docs.clean docs.clean 32 | docs.build: 33 | docker build -t streaming-runtime-site:v1 --build-arg=USER=$(shell id -u) -f $(abspath $(ROOT_DIR))/docs/Dockerfile $(abspath $(ROOT_DIR)) 34 | 35 | .PHONY: docs.build docs.serve 36 | docs.serve: 37 | docker run -p 8000:8000 -v $(abspath $(ROOT_DIR))/docs:/usr/src/mkdocs/build/docs streaming-runtime-site:v1 38 | 39 | .PHONY: docs.clean 40 | docs.clean: 41 | docker rm streaming-runtime-site:v1 42 | -------------------------------------------------------------------------------- /streaming-runtime-samples/udf-samples/udf-uppercase-go/udf-uppercase-go.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "log" 6 | "net" 7 | "strings" 8 | 9 | pb "github.com/tanzu/streaming-runtimes/udf-uppercase-go/message" 10 | 11 | "google.golang.org/grpc" 12 | ) 13 | 14 | const ( 15 | port = ":55554" 16 | ) 17 | 18 | // server is used to implement message.MessagingServiceServer. 19 | type server struct { 20 | pb.UnimplementedMessagingServiceServer 21 | } 22 | 23 | // RequestReply implements message.MessagingServiceServer#RequestReply 24 | func (s *server) RequestReply(ctx context.Context, in *pb.GrpcMessage) (*pb.GrpcMessage, error) { 25 | log.Printf("Received: %v", string(in.Payload)) 26 | upperCasePayload := strings.ToUpper(string(in.Payload)) 27 | return &pb.GrpcMessage{Payload: []byte(upperCasePayload)}, nil 28 | } 29 | 30 | func main() { 31 | listen, err := net.Listen("tcp", port) 32 | if err != nil { 33 | log.Fatalf("failed to listen: %v", err) 34 | } 35 | newServer := grpc.NewServer() 36 | pb.RegisterMessagingServiceServer(newServer, &server{}) 37 | log.Printf("server listening at %v", listen.Addr()) 38 | if err := newServer.Serve(listen); err != nil { 39 | log.Fatalf("failed to serve: %v", err) 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /srp-processor/src/main/java/com/tanzu/streaming/runtime/srp/processor/window/state/State.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2022-2022 the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * https://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.tanzu.streaming.runtime.srp.processor.window.state; 17 | 18 | import java.util.Map; 19 | import java.util.Set; 20 | 21 | /** 22 | * Represents the aggregated Windowed State. 23 | * 24 | * Keys are the window's start time timestamp. 25 | * 26 | */ 27 | public interface State { 28 | 29 | Set keys(); 30 | 31 | void put(long timestamp, Map headers, byte[] payload); 32 | 33 | StateEntry get(long timestamp); 34 | 35 | StateEntry delete(long timestamp); 36 | } 37 | -------------------------------------------------------------------------------- /streaming-runtime-samples/tutorials/2-multibiner-bridge.md: -------------------------------------------------------------------------------- 1 | ## 2. Multibinder Bridge 2 | 3 | Configure the Stream resources explicitly. 4 | The `spe.protocol` instructs what broker to be provisioned for this Stream. 5 | Current runtime implementation is using only Apache Kafka and RabbitMQ, but it can easily extended to support those additional [Binders](https://docs.spring.io/spring-cloud-stream/docs/current/reference/html/binders.html#binders) (e.g. message stream brokers): 6 | 7 | - RabbitMQ 8 | - Apache Kafka 9 | - Amazon Kinesis 10 | - Google PubSub 11 | - Solace PubSub+ 12 | - Azure Event Hubs 13 | - Azure Service Bus Queue Binder 14 | - Azure Service Bus Topic Binder 15 | - Apache RocketMQ 16 | 17 | The Stream resource represent a Binder-Access-Request to the ClusterStream resource that provisions it. 18 | When the referred ClusterStream resource is not defined the Stream reconcile Controller will try to auto-provision a ClusterStreams (unless this behavior is disabled). 19 | It is the ClusterStream that provisions the required Binders for the target protocols (e.g. Kafka, RabbitMQ...). 20 | Different protocol deployment options are available. It defaults to built-in protocol adapters but can be configured to use operators such as RabbitOperator, Strimzi or alike instead! 21 | -------------------------------------------------------------------------------- /streaming-runtime-samples/tutorials/5.1-partition-by-field(header-keys)-with-stateful-replication.yaml: -------------------------------------------------------------------------------- 1 | # 5.1 Partition by Field using Header Keys 2 | apiVersion: streaming.tanzu.vmware.com/v1alpha1 3 | kind: Processor 4 | metadata: 5 | name: user-partition-processor 6 | spec: 7 | type: SRP 8 | inputs: 9 | - name: data-in 10 | outputs: 11 | - name: partitioned-by-team-stream 12 | attributes: 13 | # The header name used for partitioning must match the outbound stream's spec.keys names!!! 14 | srp.output.headers: "team=payload.team" 15 | --- 16 | apiVersion: streaming.tanzu.vmware.com/v1alpha1 17 | kind: Stream 18 | metadata: 19 | name: partitioned-by-team-stream 20 | spec: 21 | name: partitioned-by-team 22 | protocol: kafka 23 | # The 'team' is expected to be a inbound message header name!!! 24 | keys: ["team"] 25 | partitionCount: 3 26 | --- 27 | apiVersion: streaming.tanzu.vmware.com/v1alpha1 28 | kind: Processor 29 | metadata: 30 | name: user-scores-processor 31 | spec: 32 | type: SRP 33 | replicas: 3 34 | inputs: 35 | - name: partitioned-by-team-stream 36 | outputs: 37 | - name: team-scores 38 | attributes: 39 | srp.spel.expression: "'Team:' + #jsonPath(payload, '$.team') + ', Score:' + #jsonPath(payload, '$.score')" 40 | -------------------------------------------------------------------------------- /streaming-runtime-operator/streaming-runtime/src/test/java/com/vmware/tanzu/streaming/runtime/OncePerClassBeforeAllCallback.java: -------------------------------------------------------------------------------- 1 | package com.vmware.tanzu.streaming.runtime; 2 | 3 | import org.junit.jupiter.api.extension.BeforeAllCallback; 4 | import org.junit.jupiter.api.extension.ExtensionContext; 5 | 6 | import static org.junit.jupiter.api.extension.ExtensionContext.Namespace; 7 | 8 | /** 9 | * The reason is the fact that JUnit 5 extension added on a test is inherited 10 | * by its @Nested tests and this is to guarantee the {@code BeforeAllCallback} 11 | * is executed ONCE by outer-most test this extension is applied on. 12 | */ 13 | public abstract class OncePerClassBeforeAllCallback implements BeforeAllCallback { 14 | 15 | protected abstract void oncePerClassBeforeAll(ExtensionContext context) throws Exception; 16 | 17 | @Override 18 | public final void beforeAll(ExtensionContext context) throws Exception { 19 | 20 | final String executedKey = getClass().getSimpleName() + ".executed"; 21 | 22 | final ExtensionContext.Store globalStore = context.getStore(Namespace.GLOBAL); 23 | 24 | if (globalStore.get(executedKey) != null) { 25 | return; 26 | } 27 | 28 | oncePerClassBeforeAll(context); 29 | 30 | globalStore.put(executedKey, Boolean.TRUE); 31 | } 32 | 33 | } -------------------------------------------------------------------------------- /streaming-runtime-operator/scripts/generate-streaming-runtime-crd.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Copyright 2020 The Kubernetes Authors. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | # This script generates code for the streaming runtime CRDs 18 | # Usage: 19 | # ./scripts/generate-streaming-runtime-crd.sh 20 | 21 | set -euo pipefail 22 | 23 | readonly PROJECT_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")"/.. && pwd)" 24 | 25 | bash "$PROJECT_ROOT"/scripts/crds-generator.sh \ 26 | -n com.vmware.tanzu.streaming \ 27 | -p com.vmware.tanzu.streaming \ 28 | -o "$PROJECT_ROOT"/streaming-runtime \ 29 | -u "$PROJECT_ROOT"/crds/stream-crd.yaml \ 30 | -u "$PROJECT_ROOT"/crds/cluster-stream-crd.yaml \ 31 | -u "$PROJECT_ROOT"/crds/processor-crd.yaml 32 | -------------------------------------------------------------------------------- /docs/streaming-runtime-build.md: -------------------------------------------------------------------------------- 1 | 2 | # Build & Run 3 | 4 | ## Streaming Runtime Operator 5 | build instructions to build the operator, create a container image and upload it to container registry. 6 | #### CRDs 7 | 8 | Every time the CRDs under the `./crds` folder are modified make sure to runt the regnerate the models and installation. 9 | 10 | * Generate CRDs Java api and models 11 | ```shell 12 | ./scripts/generate-streaming-runtime-crd.sh 13 | ``` 14 | Generated code is under the `./streaming-runtime/src/generated/java/com/vmware/tanzu/streaming` folder 15 | 16 | * Build operator installation yaml 17 | ```shell 18 | ./scripts/build-streaming-runtime-operator-installer.sh 19 | ``` 20 | producing the `install.yaml`. 21 | 22 | The `./scripts/all.sh` combines above two steps. 23 | 24 | #### Build the operator code and image 25 | 26 | ```shell 27 | ./mvnw clean install -Dnative -DskipTests spring-boot:build-image 28 | docker push ghcr.io/vmware-tanzu/streaming-runtimes/streaming-runtime:0.0.4-SNAPSHOT 29 | ``` 30 | (For no-native build remove the `-Dnative`). 31 | 32 | ## User Defined Functions 33 | 34 | Follow the [User Defined Function](./architecture/processors/srp/udf-overview.md) documentation to learn how to implement and build UDFs, and how to use them from within a Processor resource. 35 | 36 | -------------------------------------------------------------------------------- /streaming-runtime-operator/streaming-runtime/src/test/java/com/vmware/tanzu/streaming/runtime/throwaway/PlayEventsStream.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: streaming.tanzu.vmware.com/v1alpha1 2 | kind: Stream 3 | metadata: 4 | name: kafka-stream-playevents 5 | spec: 6 | protocol: kafka 7 | storage: 8 | clusterStream: "cluster-stream-kafka-playevents" 9 | streamMode: [ "read" ] 10 | keys: [ "song_id" ] 11 | attributes: 12 | watermark: "WATERMARK FOR `event_time` AS `event_time` - INTERVAL '30' SECONDS" 13 | payloadSchema: 14 | namespace: net.tzolov.poc.playsongs.avro 15 | type: record 16 | name: PlayEvent 17 | fields: 18 | - name: song_id 19 | type: long 20 | - name: duration 21 | type: [ "null", "long" ] # Nullable field 22 | status: 23 | conditions: 24 | - lastTransitionTime: "2022-01-15T23:48:47.028098Z" 25 | reason: StreamDeployed 26 | status: "true" 27 | type: Ready 28 | storageAddress: 29 | server: 30 | production: 31 | protocol: kafka 32 | protocolVersion: 1.0.0 33 | url: localhost:8080 34 | variables: 35 | brokers: kafka.default.svc.cluster.local:9092 36 | zkNodes: kafka-zk.default.svc.cluster.local:2181 37 | schemaRegistry: schema-registry.default.svc.cluster.local:9095 -------------------------------------------------------------------------------- /streaming-runtime-operator/streaming-runtime/src/main/java/com/vmware/tanzu/streaming/runtime/protocol/REAME.md: -------------------------------------------------------------------------------- 1 | ## Install RabbitMQ Cluster and Message Topology Operators 2 | 3 | ``` 4 | kubectl apply -f "https://github.com/rabbitmq/cluster-operator/releases/latest/download/cluster-operator.yml" 5 | kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.3.1/cert-manager.yaml 6 | kubectl apply -f https://github.com/rabbitmq/messaging-topology-operator/releases/latest/download/messaging-topology-operator-with-certmanager.yaml 7 | ``` 8 | 9 | Use the `protocolAdapterName: "rabbitmq-operator"` attribute in the `ClusterStream` or `Stream` resources: 10 | 11 | ```yaml 12 | apiVersion: streaming.tanzu.vmware.com/v1alpha1 13 | kind: ClusterStream 14 | metadata: 15 | name: test-rabbitmq-op-clusterstream 16 | spec: 17 | name: my-exchange 18 | storage: 19 | server: 20 | ... 21 | attributes: 22 | protocolAdapterName: "rabbitmq-operator" 23 | namespace: "streaming-runtime" 24 | ``` 25 | 26 | ## Install Kafka with Strimzi Operator 27 | 28 | https://strimzi.io/quickstarts/ 29 | https://blog.jromanmartin.io/2020/12/08/connecting-apicurio-registry-secured-strimzi.html 30 | 31 | ``` 32 | kubectl create namespace kafka 33 | kubectl create -f 'https://strimzi.io/install/latest?namespace=kafka' -n kafka 34 | ``` 35 | -------------------------------------------------------------------------------- /streaming-runtime-operator/streaming-runtime/src/main/java/com/vmware/tanzu/streaming/runtime/StreamingRuntimeApplication.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2022-2022 the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * https://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.vmware.tanzu.streaming.runtime; 17 | 18 | import org.springframework.boot.SpringApplication; 19 | import org.springframework.boot.autoconfigure.SpringBootApplication; 20 | import org.springframework.boot.context.properties.EnableConfigurationProperties; 21 | 22 | @SpringBootApplication 23 | @EnableConfigurationProperties(StreamingRuntimeProperties.class) 24 | public class StreamingRuntimeApplication { 25 | 26 | public static void main(String[] args) { 27 | SpringApplication.run(StreamingRuntimeApplication.class, args); 28 | } 29 | 30 | } 31 | -------------------------------------------------------------------------------- /streaming-runtime-operator/streaming-runtime/src/test/java/com/vmware/tanzu/streaming/runtime/TestApiClientConfig.java: -------------------------------------------------------------------------------- 1 | package com.vmware.tanzu.streaming.runtime; 2 | 3 | import java.io.FileReader; 4 | import java.io.IOException; 5 | 6 | import io.kubernetes.client.openapi.ApiClient; 7 | import io.kubernetes.client.util.ClientBuilder; 8 | import io.kubernetes.client.util.KubeConfig; 9 | import org.junit.jupiter.api.extension.ExtensionConfigurationException; 10 | 11 | import org.springframework.beans.factory.annotation.Value; 12 | import org.springframework.context.annotation.Bean; 13 | import org.springframework.context.annotation.Configuration; 14 | import org.springframework.context.annotation.Profile; 15 | 16 | @Configuration 17 | @Profile("componenttests") 18 | public class TestApiClientConfig { 19 | 20 | @Value("${clusterName}") 21 | private String clusterName; 22 | 23 | @Bean 24 | public ApiClient apiClient() { 25 | try { 26 | LocalClusterManager.createAndWait(clusterName); 27 | return ClientBuilder 28 | .kubeconfig(KubeConfig.loadKubeConfig(new FileReader(clusterName))) 29 | .build() 30 | .setReadTimeout(0); 31 | } 32 | catch (IOException ioExc) { 33 | throw new ExtensionConfigurationException( 34 | "Failed to create ApiClient for '" + clusterName + "' cluster", 35 | ioExc); 36 | } 37 | } 38 | 39 | } -------------------------------------------------------------------------------- /streaming-runtime-samples/anomaly-detection/light/fraud-detection-udf-js/fraud-detector.js: -------------------------------------------------------------------------------- 1 | const udf = require('streaming-runtime-udf-aggregator'); 2 | 3 | // --------- UDF aggregation function -------- 4 | function aggregate(headers, authorization, results) { 5 | 6 | if (!results.has(authorization.card_number)) { 7 | 8 | results.set(authorization.card_number, { 9 | from: headers.windowStartTime, 10 | to: headers.windowEndTime, 11 | card_number: authorization.card_number, 12 | count: 0, 13 | }); 14 | } 15 | 16 | // Increment the authorization count for that card_number. 17 | let authorizationCounter = results.get(authorization.card_number); 18 | authorizationCounter.count = Number.parseInt(authorizationCounter.count) + 1; 19 | } 20 | 21 | // --------- UDF release results function -------- 22 | function release(results) { 23 | 24 | let finalResults = new Map(); 25 | 26 | // Filter in only the aggregates with more than 5 authorization attempts. 27 | results.forEach((authorizationCounter, card_number) => { 28 | if (authorizationCounter.count > 5) { 29 | finalResults.set(card_number, authorizationCounter); 30 | } 31 | }); 32 | 33 | return finalResults; 34 | } 35 | 36 | new udf.Aggregator(aggregate, release).start(); 37 | -------------------------------------------------------------------------------- /streaming-runtime-samples/udf-samples/udf-uppercase-go/README.md: -------------------------------------------------------------------------------- 1 | # udf-uppercase-go 2 | ## protoc 3 | 4 | https://developers.google.com/protocol-buffers/docs/reference/go-generated#package 5 | https://grpc.io/docs/languages/go/quickstart/ 6 | ``` 7 | go install google.golang.org/protobuf/cmd/protoc-gen-go@latest 8 | go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@latest 9 | ``` 10 | 11 | ``` 12 | export PATH="$PATH:$(go env GOPATH)/bin" 13 | ``` 14 | 15 | ``` 16 | protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative ./protos/MessageService.proto 17 | ``` 18 | 19 | form within the `protos` folder: 20 | 21 | ``` 22 | go mod init github.com/vmware-tanzu/streaming-runtimes/udf-uppercase-go/message 23 | go mod tidy 24 | ``` 25 | 26 | https://medium.com/@adiach3nko/package-management-with-go-modules-the-pragmatic-guide-c831b4eaaf31 27 | 28 | ``` 29 | go mod edit -replace tzolov.net/go-grpc/message=./protos/ 30 | go get tzolov.net/go-grpc/message 31 | ``` 32 | 33 | ## Docker Image 34 | 35 | ``` 36 | docker build -t ghcr.io/vmware-tanzu/streaming-runtimes/udf-uppercase-go:0.1 . 37 | docker push ghcr.io/vmware-tanzu/streaming-runtimes/udf-uppercase-go:0.1 38 | ``` 39 | Test the container: 40 | ``` 41 | docker run -it -p55554:55554 ghcr.io/vmware-tanzu/streaming-runtimes/udf-uppercase-go:0.1 42 | ``` 43 | -------------------------------------------------------------------------------- /streaming-runtime-samples/tutorials/4-stateless-replication.md: -------------------------------------------------------------------------------- 1 | ## 4. Stateless Replication 2 | 3 | By default the Processor controller deploys one instance for every Processor. 4 | The `spec.replicas` is used to set the desired number of processor instances. 5 | 6 | For not-partitioned input the runtime creates, stateless, Kubernetes Deployment Pods for every processor instance and configures round-robing message delivery policy. 7 | Every inbound message is deliver to ONLY one processor instance selected on round-robing principle. The order of the instances is not guarantied. 8 | 9 | In case of partitioned input or processor 'forceStatefulSet=true' attribute, the runtime operator creates StatefulSet Pods with strict guarantees about the ordering and uniqueness of these Pods. 10 | Unlike a Deployment, the StatefulSet maintains a sticky identity for each of their Pods. These pods are created from the same spec, but are not interchangeable: each has a persistent identifier that it maintains across any rescheduling. 11 | 12 | If you want to use storage volumes to provide persistence for your workload, or use Stream partitioning, then `StatefulSet` is default configuration. Although individual Pods in a `StatefulSet` are susceptible to failure, the persistent Pod identifiers make it easier to match existing volumes to the new Pods that replace any that have failed. 13 | -------------------------------------------------------------------------------- /streaming-runtime-samples/tutorials/5-partition-by-field-with-stateful-replication.md: -------------------------------------------------------------------------------- 1 | ## 5. Partition by Field with Stateful Replication 2 | 3 | __Processor types:__ `SRP`, `SCS` 4 | 5 | __Documentation:__ [Data Partitioning ](https://vmware-tanzu.github.io/streaming-runtimes/architecture/data-partitioning/data-partitioning/) 6 | 7 | 8 | On the Steam resource that represents the partitioned connection, use the `spec.keyExpression` to define the what header or payload field to use as a discriminator to partition the data in the steam. 9 | Additionally use the spec.partitionCount property to configure the number of partitions you would like the incoming data to be partitioned into. 10 | Those properties are used to instruct the upstream processor(s) to provision the data partitioning configuration while the downstream processors are configured for partitioned inputs (e.g. enforce instance ordering and stateful connections). 11 | 12 | If the downstream processor is scaled out (e.g. `replications: N`), then the streaming runtime will ensure `StatefulSet` replication instead of `Deployment`/`ReplicationSet`. 13 | Additionally, for the processors consuming partitioned Stream, the SR configures Pod's Ordinal Index to be used as partition instance-index. 14 | Later ensures that event after Pod failure/restart the same partitions will be (re)assigned to it. 15 | 16 | 17 | -------------------------------------------------------------------------------- /streaming-runtime-samples/online-gaming-statistics/gaming-user-score/src/main/java/com/tanzu/streaming/runtime/udf/gaming/user/score/GamingUserScoreApplicationProperties.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2022-2022 the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * https://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.tanzu.streaming.runtime.udf.gaming.user.score; 17 | 18 | import org.springframework.boot.context.properties.ConfigurationProperties; 19 | 20 | @ConfigurationProperties("user.score") 21 | public class GamingUserScoreApplicationProperties { 22 | 23 | private String avroSchemaUri; 24 | 25 | public String getAvroSchemaUri() { 26 | return avroSchemaUri; 27 | } 28 | 29 | public void setAvroSchemaUri(String avroSchemaUri) { 30 | this.avroSchemaUri = avroSchemaUri; 31 | } 32 | 33 | } 34 | -------------------------------------------------------------------------------- /streaming-runtime-samples/tests/kafka-ui.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: kafka-ui 5 | labels: 6 | app: kafka-ui 7 | spec: 8 | type: LoadBalancer 9 | ports: 10 | - port: 80 11 | name: kafka-ui 12 | targetPort: 8080 13 | protocol: TCP 14 | 15 | selector: 16 | app: kafka-ui 17 | component: kafka-ui 18 | --- 19 | apiVersion: apps/v1 20 | kind: Deployment 21 | metadata: 22 | name: kafka-ui 23 | labels: 24 | app: kafka-ui 25 | component: kafka-ui 26 | spec: 27 | replicas: 1 28 | selector: 29 | matchLabels: 30 | app: kafka-ui 31 | template: 32 | metadata: 33 | labels: 34 | app: kafka-ui 35 | component: kafka-ui 36 | spec: 37 | terminationGracePeriodSeconds: 15 38 | containers: 39 | - name: kafka-ui 40 | image: provectuslabs/kafka-ui:latest 41 | ports: 42 | - containerPort: 8080 43 | env: 44 | - name: MY_NAMESPACE 45 | valueFrom: 46 | fieldRef: 47 | fieldPath: metadata.namespace 48 | - name: KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS 49 | value: kafka.$(MY_NAMESPACE).svc.cluster.local:9092 50 | - name: KAFKA_CLUSTERS_0_SCHEMAREGISTRY 51 | value: http://s-registry.$(MY_NAMESPACE).svc.cluster.local:8081 52 | -------------------------------------------------------------------------------- /srp-processor/src/main/java/com/tanzu/streaming/runtime/srp/processor/window/IdleWindowHolder.java: -------------------------------------------------------------------------------- 1 | package com.tanzu.streaming.runtime.srp.processor.window; 2 | 3 | import java.time.Duration; 4 | import java.util.concurrent.Delayed; 5 | import java.util.concurrent.TimeUnit; 6 | 7 | public class IdleWindowHolder implements Delayed { 8 | 9 | private final long timeWindowId; 10 | private final long startTime; 11 | 12 | public IdleWindowHolder(long timeWindowId, Duration delay) { 13 | this.timeWindowId = timeWindowId; 14 | this.startTime = System.currentTimeMillis() + delay.toMillis(); 15 | } 16 | 17 | @Override 18 | public int compareTo(Delayed o) { 19 | return saturatedCast(this.startTime - ((IdleWindowHolder) o).startTime); 20 | } 21 | 22 | public static int saturatedCast(long value) { 23 | if (value > Integer.MAX_VALUE) { 24 | return Integer.MAX_VALUE; 25 | } 26 | if (value < Integer.MIN_VALUE) { 27 | return Integer.MIN_VALUE; 28 | } 29 | return (int) value; 30 | } 31 | 32 | @Override 33 | public long getDelay(TimeUnit unit) { 34 | long diff = startTime - System.currentTimeMillis(); 35 | return unit.convert(diff, TimeUnit.MILLISECONDS); 36 | } 37 | 38 | public long getTimeWindowId() { 39 | return timeWindowId; 40 | } 41 | } -------------------------------------------------------------------------------- /streaming-runtime-samples/tutorials/README.md: -------------------------------------------------------------------------------- 1 | ## Quick start 2 | 3 | - Follow the [install](https://vmware-tanzu.github.io/streaming-runtimes/install/) instructions to instal the `Streaming Runtime` operator. 4 | 5 | - Deploy a selected tutorial data pipeline. 6 | Replace the `` with one of the sinppet file names form the [tutorials](https://github.com/vmware-tanzu/streaming-runtimes/tree/main/streaming-runtime-samples/tutorials) folder. 7 | ```shell 8 | kubectl apply -f 'https://raw.githubusercontent.com/vmware-tanzu/streaming-runtimes/main/streaming-runtime-samples/tutorial/.yaml' -n streaming-runtime 9 | ``` 10 | 11 | - Run the test data generator. Later generates random data, sent to the `data-in` Kafka topic: 12 | ```shell 13 | kubectl apply -f 'https://raw.githubusercontent.com/vmware-tanzu/streaming-runtimes/main/streaming-runtime-samples/tutorials/data-generator.yaml' -n streaming-runtime 14 | ``` 15 | 16 | - Follow the [explore results](https://vmware-tanzu.github.io/streaming-runtimes/samples/instructions/#explore-the-results) instructions to see what data is generated and how it is processed though the pipeline. 17 | 18 | - To delete the data pipeline and the data generator: 19 | ```shell 20 | kubectl delete srs,srcs,srp --all -n streaming-runtime 21 | kubectl delete deployments,svc -l app=tutorial-data-generator -n streaming-runtime 22 | ``` 23 | -------------------------------------------------------------------------------- /srp-processor/src/main/java/com/tanzu/streaming/runtime/srp/processor/EventTimeProcessor.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2022-2022 the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * https://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.tanzu.streaming.runtime.srp.processor; 17 | 18 | import com.tanzu.streaming.runtime.srp.watermark.WatermarkService; 19 | 20 | import org.springframework.messaging.Message; 21 | 22 | /** 23 | * Implemented by event-time aware processors. Implementations can be stateful (e.g. time-window aggregations) or 24 | * stateless (e.g. source watermark generators or mapping processors). 25 | */ 26 | public interface EventTimeProcessor { 27 | /** 28 | * Handles a new time-event message. 29 | * @param message 30 | */ 31 | void onNewMessage(Message message); 32 | 33 | WatermarkService getWatermarkService(); 34 | } 35 | -------------------------------------------------------------------------------- /streaming-runtime-samples/tutorials/5-partition-by-field-with-stateful-replication.yaml: -------------------------------------------------------------------------------- 1 | # 5. Partition by Field with Stateful Replication 2 | apiVersion: streaming.tanzu.vmware.com/v1alpha1 3 | kind: Stream 4 | metadata: 5 | name: data-in-stream 6 | spec: 7 | name: data-in 8 | protocol: kafka 9 | --- 10 | apiVersion: streaming.tanzu.vmware.com/v1alpha1 11 | kind: Processor 12 | metadata: 13 | name: team-partition-processor 14 | spec: 15 | type: SRP 16 | inputs: 17 | - name: data-in-stream 18 | outputs: 19 | - name: partitioned-by-team-stream 20 | --- 21 | apiVersion: streaming.tanzu.vmware.com/v1alpha1 22 | kind: Stream 23 | metadata: 24 | name: partitioned-by-team-stream 25 | spec: 26 | name: partitioned-by-team 27 | protocol: kafka 28 | keyExpression: "payload.team" 29 | partitionCount: 3 30 | --- 31 | apiVersion: streaming.tanzu.vmware.com/v1alpha1 32 | kind: Processor 33 | metadata: 34 | name: team-scores-processor 35 | spec: 36 | type: SRP 37 | replicas: 3 38 | inputs: 39 | - name: partitioned-by-team-stream 40 | outputs: 41 | - name: team-scores-stream 42 | attributes: 43 | srp.spel.expression: "'Team:' + #jsonPath(payload, '$.team') + ', Score:' + #jsonPath(payload, '$.score')" 44 | --- 45 | apiVersion: streaming.tanzu.vmware.com/v1alpha1 46 | kind: Stream 47 | metadata: 48 | name: team-scores-stream 49 | spec: 50 | name: team-scores 51 | protocol: kafka 52 | -------------------------------------------------------------------------------- /streaming-runtime-operator/streaming-runtime/src/main/java/com/vmware/tanzu/streaming/runtime/dataschema/AvroHelper.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2022-2022 the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * https://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.vmware.tanzu.streaming.runtime.dataschema; 17 | 18 | import com.fasterxml.jackson.databind.ObjectMapper; 19 | import com.fasterxml.jackson.dataformat.yaml.YAMLFactory; 20 | 21 | public class AvroHelper { 22 | 23 | public static String convertYamlOrJsonToJson(String yaml) { 24 | try { 25 | ObjectMapper yamlReader = new ObjectMapper(new YAMLFactory()); 26 | Object obj = yamlReader.readValue(yaml, Object.class); 27 | 28 | ObjectMapper jsonWriter = new ObjectMapper(); 29 | return jsonWriter.writeValueAsString(obj); 30 | } 31 | catch (Exception e) { 32 | throw new RuntimeException(e); 33 | } 34 | } 35 | 36 | } 37 | -------------------------------------------------------------------------------- /streaming-runtime-samples/tutorials/5.2-partition-by-field-with-stateful-replication(rabbitmq).yaml: -------------------------------------------------------------------------------- 1 | # 5.2 Partition by Field - RabbitMQ version 2 | apiVersion: streaming.tanzu.vmware.com/v1alpha1 3 | kind: Stream 4 | metadata: 5 | name: data-in-stream 6 | spec: 7 | name: data-in 8 | protocol: kafka 9 | --- 10 | apiVersion: streaming.tanzu.vmware.com/v1alpha1 11 | kind: Processor 12 | metadata: 13 | name: user-partition-processor 14 | spec: 15 | type: SRP 16 | inputs: 17 | - name: data-in-stream 18 | outputs: 19 | - name: partitioned-by-team-stream 20 | --- 21 | apiVersion: streaming.tanzu.vmware.com/v1alpha1 22 | kind: Stream 23 | metadata: 24 | name: partitioned-by-team-stream 25 | spec: 26 | name: partitioned-by-team 27 | protocol: rabbitmq 28 | keyExpression: "payload.team" 29 | partitionCount: 3 30 | --- 31 | apiVersion: streaming.tanzu.vmware.com/v1alpha1 32 | kind: Processor 33 | metadata: 34 | name: user-scores-processor 35 | spec: 36 | type: SRP 37 | replicas: 3 38 | inputs: 39 | - name: partitioned-by-team-stream 40 | outputs: 41 | - name: team-scores-stream 42 | attributes: 43 | srp.spel.expression: '''Team:'' + #jsonPath(payload, ''$.team'') + '', Score:'' + #jsonPath(payload, ''$.score'')' 44 | --- 45 | apiVersion: streaming.tanzu.vmware.com/v1alpha1 46 | kind: Stream 47 | metadata: 48 | name: team-scores-stream 49 | spec: 50 | name: team-scores 51 | protocol: rabbitmq 52 | -------------------------------------------------------------------------------- /udf-utilities/streaming-runtime-udf-aggregator-java/src/main/java/com/tanzu/streaming/runtime/udf/aggregator/IdentityReleaser.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2022-2022 the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * https://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.tanzu.streaming.runtime.udf.aggregator; 17 | 18 | import java.util.concurrent.ConcurrentHashMap; 19 | 20 | import org.springframework.messaging.MessageHeaders; 21 | 22 | /** 23 | * Simple identity implementation of the Releaser interface that just echoes the inputs. 24 | */ 25 | public class IdentityReleaser implements Releaser { 26 | 27 | @Override 28 | public ConcurrentHashMap release(ConcurrentHashMap outputAggregatedState) { 29 | return outputAggregatedState; 30 | } 31 | 32 | @Override 33 | public MessageHeaders headers(MessageHeaders outputHeaders) { 34 | return outputHeaders; 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /streaming-runtime-samples/tests/kafka-kowl-ui.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: kafka-kowl-ui 5 | labels: 6 | app: kafka-kowl-ui 7 | spec: 8 | type: LoadBalancer 9 | ports: 10 | - port: 80 11 | name: kafka-kowl-ui 12 | targetPort: 8080 13 | protocol: TCP 14 | 15 | selector: 16 | app: kafka-kowl-ui 17 | component: kafka-kowl-ui 18 | --- 19 | apiVersion: apps/v1 20 | kind: Deployment 21 | metadata: 22 | name: kafka-kowl-ui 23 | labels: 24 | app: kafka-kowl-ui 25 | component: kafka-kowl-ui 26 | spec: 27 | replicas: 1 28 | selector: 29 | matchLabels: 30 | app: kafka-kowl-ui 31 | template: 32 | metadata: 33 | labels: 34 | app: kafka-kowl-ui 35 | component: kafka-kowl-ui 36 | spec: 37 | terminationGracePeriodSeconds: 15 38 | containers: 39 | - name: kafka-kowl-ui 40 | image: quay.io/cloudhut/kowl:latest 41 | ports: 42 | - containerPort: 8080 43 | env: 44 | - name: MY_NAMESPACE 45 | valueFrom: 46 | fieldRef: 47 | fieldPath: metadata.namespace 48 | - name: KAFKA_BROKERS 49 | value: kafka.$(MY_NAMESPACE).svc.cluster.local:9092 50 | - name: KAFKA_SCHEMAREGISTRY_ENABLED 51 | value: "true" 52 | - name: KAFKA_SCHEMAREGISTRY_URLS 53 | value: http://s-registry.$(MY_NAMESPACE).svc.cluster.local:8081 54 | -------------------------------------------------------------------------------- /streaming-runtime-operator/streaming-runtime/src/main/resources/manifests/protocol/rabbitmq-op/rabbitmq-cluster.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: my-namespace 5 | --- 6 | apiVersion: rabbitmq.com/v1beta1 7 | kind: RabbitmqCluster 8 | metadata: 9 | name: my-rabbitmq-cluster 10 | namespace: my-namespace 11 | --- 12 | apiVersion: rabbitmq.com/v1beta1 13 | kind: Queue 14 | metadata: 15 | name: test # name of this custom resource; does not have to the same as the actual queue name 16 | namespace: my-namespace 17 | spec: 18 | name: test-queue # name of the queue 19 | rabbitmqClusterReference: 20 | name: my-rabbitmq-cluster 21 | namespace: my-namespace 22 | --- 23 | apiVersion: rabbitmq.com/v1beta1 24 | kind: Exchange 25 | metadata: 26 | name: fanout 27 | namespace: my-namespace 28 | spec: 29 | name: fanout-exchange # name of the exchange 30 | type: fanout # default to 'direct' if not provided; can be set to 'direct', 'fanout', 'headers', and 'topic' 31 | autoDelete: false 32 | durable: true 33 | rabbitmqClusterReference: 34 | name: my-rabbitmq-cluster 35 | namespace: my-namespace 36 | --- 37 | apiVersion: rabbitmq.com/v1beta1 38 | kind: Binding 39 | metadata: 40 | name: binding 41 | namespace: my-namespace 42 | spec: 43 | source: fanout-exchange # an existing exchange 44 | destination: test-queue # an existing queue 45 | destinationType: queue # can be 'queue' or 'exchange' 46 | rabbitmqClusterReference: 47 | name: my-rabbitmq-cluster 48 | namespace: my-namespace -------------------------------------------------------------------------------- /streaming-runtime-operator/streaming-runtime/src/test/java/com/vmware/tanzu/streaming/runtime/throwaway/SongStream.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: streaming.tanzu.vmware.com/v1alpha1 2 | kind: Stream 3 | metadata: 4 | name: kafka-stream-song 5 | spec: 6 | protocol: "kafka" 7 | storage: 8 | clusterStream: "cluster-stream-kafka-song" 9 | streamMode: [ "read" ] 10 | keys: [ "album", "genre" ] 11 | attributes: 12 | watermark: "`proctime` AS PROCTIME()" 13 | payloadSchema: 14 | namespace: net.tzolov.poc.playsongs.avro 15 | type: record 16 | name: Song 17 | fields: 18 | - name: id 19 | type: long 20 | - name: name 21 | type: string 22 | - name: album 23 | type: [ "null", "string" ] # Nullable field 24 | - name: artist 25 | type: [ "null", "string" ] # Nullable field 26 | - name: genre 27 | type: string 28 | - name: at 29 | type: 30 | type: long 31 | logicalType: timestamp-millis 32 | status: 33 | conditions: 34 | - lastTransitionTime: "2022-01-15T23:48:47.028098Z" 35 | reason: StreamDeployed 36 | status: "true" 37 | type: Ready 38 | storageAddress: 39 | server: 40 | production: 41 | protocol: kafka 42 | protocolVersion: 1.0.0 43 | url: localhost:8080 44 | variables: 45 | brokers: kafka.default.svc.cluster.local:9092 46 | zkNodes: kafka-zk.default.svc.cluster.local:2181 47 | schemaRegistry: schema-registry.default.svc.cluster.local:9095 -------------------------------------------------------------------------------- /streaming-runtime-operator/streaming-runtime/src/main/resources/manifests/protocol/rabbitmq-op/RABBITMQ-CLUSTERSTREAM-AUTOPROVISION-TEMPLATE.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: @@namespace@@ 5 | --- 6 | apiVersion: rabbitmq.com/v1beta1 7 | kind: RabbitmqCluster 8 | metadata: 9 | name: @@cluster-name@@ 10 | namespace: @@namespace@@ 11 | --- 12 | apiVersion: rabbitmq.com/v1beta1 13 | kind: Queue 14 | metadata: 15 | name: @@queue-name@@ 16 | namespace: @@namespace@@ 17 | spec: 18 | name: @@queue-name@@ # name of the queue 19 | rabbitmqClusterReference: 20 | name: @@cluster-name@@ 21 | namespace: @@namespace@@ 22 | --- 23 | apiVersion: rabbitmq.com/v1beta1 24 | kind: Exchange 25 | metadata: 26 | name: @@exchange-name@@ 27 | namespace: @@namespace@@ 28 | spec: 29 | name: @@exchange-name@@ # name of the exchange 30 | type: fanout # default to 'direct' if not provided; can be set to 'direct', 'fanout', 'headers', and 'topic' 31 | autoDelete: false 32 | durable: true 33 | rabbitmqClusterReference: 34 | name: @@cluster-name@@ 35 | namespace: @@namespace@@ 36 | --- 37 | apiVersion: rabbitmq.com/v1beta1 38 | kind: Binding 39 | metadata: 40 | name: binding-@@exchange-name@@-@@queue-name@@ 41 | namespace: @@namespace@@ 42 | spec: 43 | source: @@exchange-name@@ # an existing exchange 44 | destination: @@queue-name@@ # an existing queue 45 | destinationType: queue # can be 'queue' or 'exchange' 46 | rabbitmqClusterReference: 47 | name: @@cluster-name@@ 48 | namespace: @@namespace@@ -------------------------------------------------------------------------------- /srp-processor/src/main/java/com/tanzu/streaming/runtime/srp/processor/window/state/StateEntry.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2002-2020 the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * https://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.tanzu.streaming.runtime.srp.processor.window.state; 17 | 18 | import java.util.Collection; 19 | import java.util.Map; 20 | 21 | public class StateEntry { 22 | 23 | private final long timestamp; 24 | private final Map headers; 25 | private final Collection payloads; 26 | 27 | public StateEntry(long timestamp, Map headers, Collection payloads) { 28 | this.timestamp = timestamp; 29 | this.headers = headers; 30 | this.payloads = payloads; 31 | } 32 | 33 | public long getTimestamp() { 34 | return timestamp; 35 | } 36 | 37 | public Collection getPayloads() { 38 | return payloads; 39 | } 40 | 41 | public Map getHeaders() { 42 | return headers; 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /streaming-runtime-operator/manifests/streaming-runtime-cluster-role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: streaming-runtime-cluster-role 5 | rules: 6 | - apiGroups: 7 | - streaming.tanzu.vmware.com 8 | resources: 9 | - clusterstreams 10 | - streams 11 | - processors 12 | verbs: 13 | - get 14 | - list 15 | - watch 16 | - patch 17 | - create 18 | - apiGroups: 19 | - streaming.tanzu.vmware.com 20 | resources: 21 | - clusterstreams/status 22 | - streams/status 23 | - processors/status 24 | verbs: 25 | - patch 26 | - get 27 | - list 28 | - watch 29 | - apiGroups: 30 | - "events.k8s.io" 31 | resources: 32 | - events 33 | verbs: 34 | - create 35 | - apiGroups: 36 | - "" 37 | resources: 38 | - services 39 | - pods 40 | - configmaps 41 | - secrets 42 | verbs: 43 | - get 44 | - list 45 | - watch 46 | - patch 47 | - create 48 | - apiGroups: 49 | - apps 50 | resources: 51 | - deployments 52 | - statefulsets 53 | - statefulsets/scale 54 | verbs: 55 | - get 56 | - list 57 | - create 58 | - patch 59 | - watch 60 | - update 61 | - apiGroups: 62 | - rabbitmq.com 63 | resources: 64 | - rabbitmqclusters 65 | - queues 66 | - exchanges 67 | - bindings 68 | verbs: 69 | - get 70 | - list 71 | - watch 72 | - patch 73 | - create -------------------------------------------------------------------------------- /streaming-runtime-samples/tests/kafka-akhg-ui.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: kafka-akhg-ui 5 | labels: 6 | app: kafka-akhg-ui 7 | spec: 8 | type: LoadBalancer 9 | ports: 10 | - port: 80 11 | name: kafka-akhg-ui 12 | targetPort: 8080 13 | protocol: TCP 14 | 15 | selector: 16 | app: kafka-akhg-ui 17 | component: kafka-akhg-ui 18 | --- 19 | apiVersion: apps/v1 20 | kind: Deployment 21 | metadata: 22 | name: kafka-akhg-ui 23 | labels: 24 | app: kafka-akhg-ui 25 | component: kafka-akhg-ui 26 | spec: 27 | replicas: 1 28 | selector: 29 | matchLabels: 30 | app: kafka-akhg-ui 31 | template: 32 | metadata: 33 | labels: 34 | app: kafka-akhg-ui 35 | component: kafka-akhg-ui 36 | spec: 37 | terminationGracePeriodSeconds: 15 38 | containers: 39 | - name: kafka-akhg-ui 40 | image: tchiotludo/akhq:latest 41 | ports: 42 | - containerPort: 8080 43 | env: 44 | - name: MY_NAMESPACE 45 | valueFrom: 46 | fieldRef: 47 | fieldPath: metadata.namespace 48 | - name: AKHQ_CONFIGURATION 49 | value: | 50 | akhq: 51 | connections: 52 | docker-kafka-server: 53 | properties: 54 | bootstrap.servers: "kafka.$(MY_NAMESPACE).svc.cluster.local:9092" 55 | schema-registry: 56 | url: "http://s-registry.$(MY_NAMESPACE).svc.cluster.local:8081" 57 | -------------------------------------------------------------------------------- /streaming-runtime-operator/streaming-runtime/src/main/java/com/vmware/tanzu/streaming/runtime/ProcessorStatusException.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2022-2022 the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * https://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.vmware.tanzu.streaming.runtime; 17 | 18 | import com.vmware.tanzu.streaming.models.V1alpha1Processor; 19 | 20 | public class ProcessorStatusException extends Exception { 21 | 22 | private final V1alpha1Processor processor; 23 | private final String status; 24 | private final String reason; 25 | 26 | public ProcessorStatusException(V1alpha1Processor processor, String status, String reason, String message) { 27 | super(message); 28 | this.processor = processor; 29 | this.status = status; 30 | this.reason = reason; 31 | } 32 | 33 | public String getReason() { 34 | return reason; 35 | } 36 | 37 | public V1alpha1Processor getProcessor() { 38 | return processor; 39 | } 40 | 41 | public String getStatus() { 42 | return status; 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /streaming-runtime-operator/streaming-runtime/src/main/resources/manifests/protocol/kafka/kafka-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: kafka-broker 5 | labels: 6 | app: kafka 7 | type: streaming-spike 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | app: kafka 13 | template: 14 | metadata: 15 | labels: 16 | app: kafka 17 | component: kafka-broker 18 | spec: 19 | containers: 20 | - name: kafka 21 | image: wurstmeister/kafka:2.12-2.5.0 22 | ports: 23 | - containerPort: 9094 24 | env: 25 | - name: MY_KAFKA_POD_IP 26 | valueFrom: 27 | fieldRef: 28 | fieldPath: status.podIP 29 | - name: ENABLE_AUTO_EXTEND 30 | value: "true" 31 | - name: KAFKA_RESERVED_BROKER_MAX_ID 32 | value: "999999999" 33 | - name: KAFKA_AUTO_CREATE_TOPICS_ENABLE 34 | value: "true" 35 | - name: KAFKA_LISTENERS 36 | value: "PLAINTEXT://:9092,OUTSIDE://:9094" 37 | - name: KAFKA_ADVERTISED_LISTENERS 38 | value: "PLAINTEXT://$(MY_KAFKA_POD_IP):9092,OUTSIDE://localhost:9094" 39 | - name: KAFKA_LISTENER_SECURITY_PROTOCOL_MAP 40 | value: "PLAINTEXT:PLAINTEXT,OUTSIDE:PLAINTEXT" 41 | - name: KAFKA_INTER_BROKER_LISTENER_NAME 42 | value: "PLAINTEXT" 43 | - name: KAFKA_PORT 44 | value: "9092" 45 | - name: KAFKA_ZOOKEEPER_CONNECT 46 | value: kafka-zk:2181 -------------------------------------------------------------------------------- /srp-processor/src/main/java/com/tanzu/streaming/runtime/srp/timestamp/MessageHeaderTimestampAssigner.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2022-2022 the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * https://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.tanzu.streaming.runtime.srp.timestamp; 17 | 18 | import org.springframework.messaging.Message; 19 | import org.springframework.util.Assert; 20 | 21 | /** 22 | * Lookups the timestamp from pre-configured message's header. 23 | */ 24 | public class MessageHeaderTimestampAssigner implements RecordTimestampAssigner { 25 | 26 | private final String headerName; 27 | 28 | public MessageHeaderTimestampAssigner(String headerName) { 29 | Assert.hasText(headerName, "Header name can not be empty!"); 30 | this.headerName = headerName; 31 | } 32 | 33 | @Override 34 | public long extractTimestamp(Message message) { 35 | if (!message.getHeaders().containsKey(this.headerName)) { 36 | return RecordTimestampAssigner.NO_TIMESTAMP; 37 | } 38 | return message.getHeaders().get(this.headerName, Long.class); 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /docs/architecture/processors/overview.md: -------------------------------------------------------------------------------- 1 | # Processors 2 | 3 | The `Processor` represents an independent event-driven streaming application that can consume one or more input Streams, transform the received data and send the results downstream over one or more output Streams. 4 | 5 | For a detailed description of attributes of the resource please read [processor-crd.yaml](https://github.com/vmware-tanzu/streaming-runtimes/blob/main/streaming-runtime-operator/crds/processor-crd.yaml){:target="_blank"} 6 | 7 | 8 | ![Multi In/Out Processor](../../sr-multi-in-out-processor.svg) 9 | 10 | The Streaming Runtime provides a built-in, general purpose Processor of type [SRP](srp/overview.md) and two additional processor types to provide integration with 3rd party streaming technologies, such as Apache Flink (type: [FSQL](fsql/overview.md)) and Spring Cloud Stream/Spring Cloud Function (type: [SCS](scs/overview.md)). 11 | Processors from all types can be combined and used interchangeably. 12 | 13 | The Streaming Runtime allows implementing additional Processor types that can provide integration with other streaming systems such as Apache Spark, KSQL and alike. 14 | 15 | ## Processor types 16 | 17 | - [SRP](srp/overview.md): Streaming Runtime Processor. Processor built-in the Streaming Runtime, that allow various streaming transformation, 18 | such as message brokers bridging, custom user-defined functions in the language of choice and simple tumbling time-window aggregation. 19 | - [FSQL](fsql/overview.md): Backed by Apache Flink SQL Streaming. Allow inline streaming SQL queries definition. 20 | - [SCS](scs/overview.md): Runs Spring Cloud Stream applications as processors in the pipeline. 21 | -------------------------------------------------------------------------------- /streaming-runtime-operator/streaming-runtime/src/main/java/com/vmware/tanzu/streaming/runtime/StreamingRuntimeProperties.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2022-2022 the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * https://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package com.vmware.tanzu.streaming.runtime; 18 | 19 | import org.springframework.boot.context.properties.ConfigurationProperties; 20 | 21 | @ConfigurationProperties("streaming.runtime.operator") 22 | public class StreamingRuntimeProperties { 23 | 24 | private boolean autoProvisionClusterStream = true; 25 | 26 | private boolean autoProvisionStream = true; 27 | 28 | public boolean isAutoProvisionClusterStream() { 29 | return this.autoProvisionClusterStream; 30 | } 31 | 32 | public void setAutoProvisionClusterStream(boolean autoProvisionClusterStream) { 33 | this.autoProvisionClusterStream = autoProvisionClusterStream; 34 | } 35 | 36 | public void setAutoProvisionStream(boolean autoProvisionStream) { 37 | this.autoProvisionStream = autoProvisionStream; 38 | } 39 | 40 | public boolean isAutoProvisionStream() { 41 | return this.autoProvisionStream; 42 | } 43 | 44 | } 45 | -------------------------------------------------------------------------------- /material/overrides/main.html: -------------------------------------------------------------------------------- 1 | {% extends "base.html" %} 2 | 3 | 4 | {% block extrahead %} 5 | 6 | 7 | {% set title = config.site_name %} 8 | {% if page and page.title and not page.is_homepage %} 9 | {% set title = config.site_name ~ " - " ~ page.title | striptags %} 10 | {% endif %} 11 | 12 | 13 | {% set image = config.site_url ~ 'assets/images/illustrations/banner.png' %} 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 33 | {% endblock %} 34 | 35 | 36 | {% block announce %} 37 |
38 |

The Streaming Runtimes Operator is archived. 39 | Learn more. 40 |

41 |
42 | 43 | {% endblock %} -------------------------------------------------------------------------------- /udf-utilities/streaming-runtime-udf-aggregator-java/src/main/java/com/tanzu/streaming/runtime/udf/aggregator/PayloadConverter.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2022-2022 the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * https://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.tanzu.streaming.runtime.udf.aggregator; 17 | 18 | import org.springframework.util.MimeType; 19 | 20 | /** 21 | * Converts from bytes payloads to a record (e.g. avro or JSON map) and from record to byte array payload. 22 | */ 23 | public interface PayloadConverter { 24 | /** 25 | * Convert byte array payload into a record. 26 | * @param payload byte array payload to convert 27 | * @param payloadContentType input payload content type. 28 | * @return Returns a 29 | */ 30 | T fromPayload(byte[] payload, MimeType payloadContentType); 31 | 32 | /** 33 | * Covert from the domain type to wire byte array payload. 34 | * @param record domain type record. 35 | * @param targetContentType target content type. 36 | * @return Returns byte array encoded content in accordance with the target content type. 37 | */ 38 | byte[] toPayload(T record, MimeType targetContentType); 39 | } 40 | -------------------------------------------------------------------------------- /streaming-runtime-operator/streaming-runtime/src/test/java/com/vmware/tanzu/streaming/runtime/throwaway/TopKSongsPerGenreStream.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: streaming.tanzu.vmware.com/v1alpha1 2 | kind: Stream 3 | metadata: 4 | name: kafka-stream-topksongspergenre 5 | spec: 6 | protocol: kafka 7 | storage: 8 | clusterStream: cluster-stream-kafka-topksongspergenre 9 | streamMode: [ "read" ] 10 | keys: [ "song_id" ] 11 | attributes: 12 | ddlPrimaryKey: "PRIMARY KEY (`window_start`, `window_end`, `song_id`, `genre`) NOT ENFORCED" 13 | ddlConnector: "upsert-kafka" 14 | ddlWithExtension: "'properties.allow.auto.create.topics' = 'true'" 15 | ddlValueFormat: "json" 16 | payloadSchema: 17 | namespace: net.tzolov.poc.playsongs.avro 18 | type: record 19 | name: TopKSongsPerGenre 20 | fields: 21 | - name: window_start 22 | type: 23 | type: long 24 | logicalType: timestamp-millis 25 | - name: window_end 26 | type: 27 | type: long 28 | logicalType: timestamp-millis 29 | - name: song_id 30 | type: long 31 | - name: name 32 | type: string 33 | - name: genre 34 | type: string 35 | - name: song_play_count 36 | type: [ "null", "long" ] # Nullable field 37 | status: 38 | conditions: 39 | - lastTransitionTime: "2022-01-15T23:48:47.028098Z" 40 | reason: StreamDeployed 41 | status: "true" 42 | type: Ready 43 | storageAddress: 44 | server: 45 | production: 46 | protocol: kafka 47 | protocolVersion: 1.0.0 48 | url: localhost:8080 49 | variables: 50 | brokers: kafka.default.svc.cluster.local:9092 51 | zkNodes: kafka-zk.default.svc.cluster.local:2181 52 | -------------------------------------------------------------------------------- /streaming-runtime-operator/streaming-runtime/src/test/java/com/vmware/tanzu/streaming/runtime/throwaway/SongPlaysStream.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: streaming.tanzu.vmware.com/v1alpha1 2 | kind: Stream 3 | metadata: 4 | name: kafka-stream-songplays 5 | spec: 6 | protocol: "kafka" 7 | storage: 8 | clusterStream: "cluster-stream-kafka-songplays" 9 | streamMode: [ "read", "write"] 10 | keys: [ "name", "genre" ] 11 | attributes: 12 | watermark: "WATERMARK FOR `event_time` AS `event_time` - INTERVAL '1' SECOND" 13 | ddlWithExtension1: "'key.fields' = 'song_id'" 14 | ddlValueFormat: "json" 15 | payloadSchema: 16 | namespace: net.tzolov.poc.playsongs.avro 17 | type: record 18 | name: SongPlays 19 | fields: 20 | - name: song_id 21 | type: long 22 | - name: name 23 | type: string 24 | - name: album 25 | type: [ "null", "string" ] # Nullable field 26 | - name: artist 27 | type: [ "null", "string" ] # Nullable field 28 | - name: genre 29 | type: string 30 | - name: duration 31 | type: [ "null", "long" ] # Nullable field 32 | - name: event_time 33 | type: 34 | type: long 35 | logicalType: timestamp-millis 36 | status: 37 | conditions: 38 | - lastTransitionTime: "2022-01-15T23:48:47.028098Z" 39 | reason: StreamDeployed 40 | status: "true" 41 | type: Ready 42 | storageAddress: 43 | server: 44 | production: 45 | protocol: kafka 46 | protocolVersion: 1.0.0 47 | url: localhost:8080 48 | variables: 49 | brokers: kafka.default.svc.cluster.local:9092 50 | zkNodes: kafka-zk.default.svc.cluster.local:2181 51 | schemaRegistry: schema-registry.default.svc.cluster.local:9095 -------------------------------------------------------------------------------- /streaming-runtime-operator/streaming-runtime/src/test/java/com/vmware/tanzu/streaming/runtime/MyComponentTest.java: -------------------------------------------------------------------------------- 1 | package com.vmware.tanzu.streaming.runtime; 2 | 3 | import io.kubernetes.client.openapi.ApiException; 4 | import org.junit.jupiter.api.BeforeAll; 5 | import org.junit.jupiter.api.Test; 6 | 7 | import org.springframework.boot.test.context.SpringBootTest; 8 | import org.springframework.test.context.ActiveProfiles; 9 | 10 | import static java.util.UUID.randomUUID; 11 | import static org.awaitility.Awaitility.await; 12 | 13 | @KubernetesComponentTest 14 | @SpringBootTest(properties = "clusterName=createcomponenttest") 15 | @ActiveProfiles("componenttests") 16 | public class MyComponentTest { 17 | 18 | private TestK8SClient testK8SClient; 19 | 20 | private String createNamespace; 21 | 22 | @BeforeAll 23 | void setupCreateScenario() throws Exception { 24 | this.createNamespace = "create-component-test-" + randomUUID().toString().substring(0, 8); 25 | //createScenario = new ClusterConfigurationSourceScenario() 26 | // .withCreateSlice().acsNamespace(createNamespace) 27 | // .run(testK8SClient); 28 | } 29 | 30 | @Test 31 | void shouldCreateConfigMap() throws ApiException { 32 | System.out.println("Boza"); 33 | //await(format("configMap %s/%s", createNamespace, createScenario.getConfigMapRevisionNames().peek())) 34 | // .atMost(Duration.ofMinutes(1)) 35 | // .pollInterval(Duration.ofSeconds(1)) 36 | // .until(() -> testK8SClient.configMapExists( 37 | // createScenario.getConfigMapRevisionNames().peek(), 38 | // createNamespace)); 39 | // 40 | //assertThat(testK8SClient.configMapData( 41 | // createScenario.getConfigMapRevisionNames().peek(), 42 | // createNamespace)) 43 | // .containsEntry("cook.special", "Cake a la mode"); 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /srp-processor/src/main/java/com/tanzu/streaming/runtime/srp/timestamp/RecordTimestampAssigner.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2022-2022 the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * https://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package com.tanzu.streaming.runtime.srp.timestamp; 18 | 19 | import org.springframework.messaging.Message; 20 | 21 | /** 22 | * Extracts the Event (or Processing) time to be assigned with the processed record. The timestamps can be extracted 23 | * from existing header or payload fields or computed from the message. The 'proc' is reserved and stands from 24 | * processing-timestamp. 25 | */ 26 | public interface RecordTimestampAssigner { 27 | /** 28 | * The value that is passed to {@link #extractTimestamp} when there is no previous timestamp attached to the record. 29 | */ 30 | long NO_TIMESTAMP = Long.MIN_VALUE; 31 | 32 | /** 33 | * Computes and assigns a timestamp for the record. Value is in milliseconds since the Epoch. This is independent of 34 | * any particular time zone or calendar. 35 | * 36 | * @param record The element that the timestamp will be assigned to. 37 | * @return The new timestamp. 38 | */ 39 | long extractTimestamp(Message record); 40 | } 41 | -------------------------------------------------------------------------------- /udf-utilities/streaming-runtime-udf-aggregator-java/src/main/java/com/tanzu/streaming/runtime/udf/aggregator/Aggregate.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2022-2022 the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * https://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.tanzu.streaming.runtime.udf.aggregator; 17 | 18 | import java.util.concurrent.ConcurrentHashMap; 19 | 20 | import org.springframework.messaging.MessageHeaders; 21 | 22 | /** 23 | * Primary hook to implement the user defined function (UDF). When receiving a new Time-Window aggregate of T records, 24 | * the Aggregator calls the aggregate method, sequentially, for every input T record. That way the aggregate 25 | * implementation can compute a new aggregate state to be sent downstream. 26 | */ 27 | public interface Aggregate { 28 | /** 29 | * Method called for each element on the Time-Windowed aggregation. 30 | * @param headers - Headers for the (multipart) TWA input. 31 | * @param twaRecord Record from the aggregated TWA aggregation. 32 | * @param outputAggregatedState Computed output state. Every separate map entry would result in separate output 33 | * message. 34 | */ 35 | void aggregate(MessageHeaders headers, T twaRecord, ConcurrentHashMap outputAggregatedState); 36 | } 37 | -------------------------------------------------------------------------------- /srp-processor/src/main/java/com/tanzu/streaming/runtime/srp/timestamp/DefaultEventHeaderOrProcTimestampAssigner.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2022-2022 the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * https://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.tanzu.streaming.runtime.srp.timestamp; 17 | 18 | import org.springframework.messaging.Message; 19 | 20 | /** 21 | * If the massage contains an 'eventtime' header, delegate to the MessageHeaderTimestampAssigner otherwise fallback to 22 | * the ProcTimestampAssigner. 23 | */ 24 | public class DefaultEventHeaderOrProcTimestampAssigner implements RecordTimestampAssigner { 25 | 26 | private static final String EVENTTIME_HEADER = "eventtime"; 27 | 28 | private ProcTimestampAssigner procTimestampAssigner = new ProcTimestampAssigner(); 29 | 30 | private MessageHeaderTimestampAssigner messageHeaderTimestampAssigner = new MessageHeaderTimestampAssigner( 31 | EVENTTIME_HEADER); 32 | 33 | @Override 34 | public long extractTimestamp(Message record) { 35 | if (record.getHeaders().containsKey(EVENTTIME_HEADER)) { 36 | return this.messageHeaderTimestampAssigner.extractTimestamp(record); 37 | } 38 | return this.procTimestampAssigner.extractTimestamp(record); 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /docs/sr-technical-stack.md: -------------------------------------------------------------------------------- 1 | 2 | ## Streaming-Lite 3 | 4 | For applications which rely on time-ordered delivery of events for a single key, but which do not perform any cross-key operations like joins or repartitioning. 5 | These applications include windowing in some cases but not others. 6 | Typically, streaming-lite applications run as pipelined workloads in an existing scheduler framework such as Kubernetes. 7 | Examples include ETL applications and time-series analysis. 8 | 9 | The Streaming Runtime [SRP](./architecture/processors/srp/overview.md) and [SCS](./architecture/processors/scs/overview.md) processor types are provided to support the Streaming Light use case. The built-in, general purpose SRP processor is capable of data partitioning, scaling, time-window aggregation and polyglot UDF functions. The SCS processor provide drop-in support for the large Spring Cloud Stream / Spring Cloud Function ecosystems. 10 | 11 | ## Streaming-Expert 12 | 13 | For applications which perform cross-stream analysis using joins and/or repartitioning, or applications which may perform dynamic stream operations (for example, windowing based on data conditions rather than time or rows). These applications typically using a pool of worker nodes, where the code and data is distributed to nodes using a framework-specific technique. Examples include Apache Spark, Apache Beam, and Apache Flink. 14 | 15 | Currently the Streaming Runtime offers a [FSQL](./architecture/processors/fsql/overview.md) processor type that can run fully-fledged Apache Flink Streaming SQL queries. (in embedded mode only). 16 | 17 | ## Implementation Stack 18 | 19 | The Streaming Runtime implementation stack looks like this: 20 | 21 | ![](./assets/images/sr-tech-stack6.svg) 22 | 23 | 24 | TODO: Provide [CRC documentation](http://agilemodeling.com/artifacts/crcModel.htm) for the implementation stack. -------------------------------------------------------------------------------- /streaming-runtime-operator/streaming-runtime/src/test/java/com/vmware/tanzu/streaming/runtime/TestK8SClient.java: -------------------------------------------------------------------------------- 1 | package com.vmware.tanzu.streaming.runtime; 2 | 3 | import java.io.IOException; 4 | 5 | import io.kubernetes.client.openapi.ApiClient; 6 | import io.kubernetes.client.openapi.ApiException; 7 | import io.kubernetes.client.openapi.apis.ApiextensionsV1Api; 8 | import io.kubernetes.client.openapi.apis.AppsV1Api; 9 | import io.kubernetes.client.openapi.apis.CoreV1Api; 10 | import io.kubernetes.client.openapi.models.V1CustomResourceDefinition; 11 | import org.yaml.snakeyaml.Yaml; 12 | 13 | import org.springframework.core.io.DefaultResourceLoader; 14 | 15 | import static org.awaitility.Awaitility.await; 16 | 17 | public class TestK8SClient { 18 | private static final String CLUSTER_STREAM_CRD_URI = "file:/Users/ctzolov/Dev/projects/tanzu/streaming-runtime-parent/crds/cluster-stream-crd.yaml"; 19 | private final AppsV1Api appsV1Api; 20 | private final ApiextensionsV1Api apiextensionsV1Api; 21 | private final CoreV1Api coreV1Api; 22 | private final V1CustomResourceDefinition clusterStreamCrdDefinition; 23 | 24 | public TestK8SClient(ApiClient apiClient) throws IOException { 25 | appsV1Api = new AppsV1Api(apiClient); 26 | apiextensionsV1Api = new ApiextensionsV1Api(apiClient); 27 | coreV1Api = new CoreV1Api(apiClient); 28 | Yaml yaml = new Yaml(); 29 | clusterStreamCrdDefinition = yaml.loadAs( 30 | new DefaultResourceLoader().getResource(CLUSTER_STREAM_CRD_URI).getInputStream(), 31 | V1CustomResourceDefinition.class); 32 | ; 33 | } 34 | 35 | public void createCrd() throws ApiException { 36 | apiextensionsV1Api.createCustomResourceDefinition(clusterStreamCrdDefinition, null, null, null); 37 | await("CLUSTER STREAM CRD") 38 | .until(() -> apiextensionsV1Api.readCustomResourceDefinition( 39 | clusterStreamCrdDefinition.getMetadata().getName(), null) != null); 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /rules.mk: -------------------------------------------------------------------------------- 1 | ifndef RULES_MK # Prevent repeated "-include". 2 | RULES_MK := $(lastword $(MAKEFILE_LIST)) 3 | RULES_INCLUDE_DIR := $(dir $(RULES_MK)) 4 | ROOT_DIR := $(RULES_INCLUDE_DIR) 5 | 6 | .DEFAULT_GOAL := all 7 | .DELETE_ON_ERROR: # This will delete files from targets that don't succeed. 8 | .SUFFIXES: # This removes a lot of the implicit rules. 9 | 10 | os.name := $(shell uname -s | tr A-Z a-z) 11 | 12 | out_dir := $(abspath $(ROOT_DIR)/out) 13 | build_dir := $(abspath $(ROOT_DIR)/build) 14 | 15 | time_format := epoch 16 | build_time.human := $(shell date +'%Y%m%d-%H%M%S') 17 | build_time.epoch := $(shell date +'%s') 18 | build_time = $(build_time.$(time_format)) 19 | 20 | git.dirty := $(shell git status -s) 21 | git.branch := $(shell git rev-parse --abbrev-ref HEAD) 22 | git.commit := $(shell git rev-parse HEAD) 23 | 24 | registry.location := ghcr 25 | registry.ghcr := ghcr.io/vmware-tanzu/streaming-runtimes 26 | registry.other := $(REGISTRY) 27 | registry = $(registry.$(registry.location)) 28 | 29 | ifeq ($(registry.location), other) 30 | ifndef REGISTRY 31 | $(error REGISTRY not defined. This is required for targetting "other" registry) 32 | endif 33 | endif 34 | 35 | build := commit 36 | ifneq (,$(findstring release/, $(git.branch))) 37 | build := release 38 | else ifeq ($(GIT_BRANCH), main) 39 | build := latest 40 | endif 41 | 42 | # If the repo is dirty, we will consider it dev. 43 | # Commit your work if you want consistency 44 | ifeq ($(GIT_DIRTY),) 45 | build := dev 46 | endif 47 | 48 | include $(ROOT_DIR)/version.mk 49 | include $(ROOT_DIR)/tools.mk 50 | 51 | # Sha recipes 52 | %.sha256: % 53 | cd $(dir $<) && shasum -a 256 $(notdir $<) > $@ 54 | 55 | %.print-sha: %.sha256 56 | @cat $< 57 | 58 | rules.clean: 59 | $(RM) -rf $(out_dir) 60 | $(RM) -rf $(build_dir) 61 | 62 | clean .PHONY: rules.clean 63 | 64 | define INCLUDE_FILE 65 | path = $(1) 66 | include $(1)/Makefile 67 | endef 68 | 69 | endif 70 | -------------------------------------------------------------------------------- /streaming-runtime-operator/streaming-runtime/src/main/java/com/vmware/tanzu/streaming/runtime/dataschema/InlineAvroToAvroConverter.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2022-2022 the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * https://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.vmware.tanzu.streaming.runtime.dataschema; 17 | 18 | import org.apache.avro.Schema; 19 | 20 | import org.springframework.stereotype.Component; 21 | import org.springframework.util.Assert; 22 | 23 | @Component 24 | public class InlineAvroToAvroConverter implements DataSchemaAvroConverter { 25 | 26 | public static final String TYPE = "avro"; 27 | 28 | @Override 29 | public String getSupportedDataSchemaType() { 30 | return TYPE; 31 | } 32 | 33 | @Override 34 | public Schema toAvro(DataSchemaProcessingContext context) { 35 | 36 | Assert.isTrue(getSupportedDataSchemaType().equalsIgnoreCase( 37 | context.getDataSchemaContext().getInline().getType()), 38 | String.format("Wrong schema representation: %s for converter type %s", 39 | context.getDataSchemaContext().getInline().getType(), this.getSupportedDataSchemaType())); 40 | 41 | String inlineAvroSchema = context.getDataSchemaContext().getInline().getSchema(); 42 | String jsonAvroSchema = AvroHelper.convertYamlOrJsonToJson(inlineAvroSchema); 43 | return new org.apache.avro.Schema.Parser().parse(jsonAvroSchema); 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /streaming-runtime-samples/spring-cloud-stream/streaming-pipeline-ticktock-partitioned-better.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: streaming.tanzu.vmware.com/v1alpha1 3 | kind: Processor 4 | metadata: 5 | name: time-source 6 | spec: 7 | type: SCS 8 | outputs: 9 | - name: "timestamps-stream" 10 | template: 11 | spec: 12 | containers: 13 | - name: scdf-time-source-kafka 14 | image: springcloudstream/time-source-kafka:3.2.0 15 | env: 16 | - name: SPRING_CLOUD_STREAM_POLLER_FIXED-DELAY 17 | value: "1000" 18 | - name: TIME_DATE-FORMAT 19 | value: "ss" 20 | # value: "dd/MM/yyyy HH:mm:ss" 21 | --- 22 | apiVersion: streaming.tanzu.vmware.com/v1alpha1 23 | kind: Processor 24 | metadata: 25 | name: transformer 26 | spec: 27 | type: SCS 28 | inputs: 29 | - name: "timestamps-stream" 30 | outputs: 31 | - name: "uppercase-stream" 32 | template: 33 | spec: 34 | containers: 35 | - name: scdf-transform-processor-kafka 36 | image: springcloudstream/transform-processor-kafka:3.2.0 37 | env: 38 | - name: SPEL_FUNCTION_EXPRESSION 39 | value: "payload.toUpperCase()" 40 | --- 41 | apiVersion: streaming.tanzu.vmware.com/v1alpha1 42 | kind: Stream 43 | metadata: 44 | name: uppercase-stream 45 | spec: 46 | name: uppercase 47 | protocol: "kafka" 48 | keyExpression: payload 49 | partitionCount: 3 50 | --- 51 | apiVersion: streaming.tanzu.vmware.com/v1alpha1 52 | kind: Processor 53 | metadata: 54 | name: log-sink 55 | spec: 56 | type: SCS 57 | inputs: 58 | - name: "uppercase-stream" 59 | replicas: 3 60 | template: 61 | spec: 62 | containers: 63 | - name: scdf-log-sink-kafka 64 | image: springcloudstream/log-sink-kafka:3.2.0 65 | env: 66 | - name: LOG_EXPRESSION 67 | value: "'My uppercase timestamp is: ' + payload" -------------------------------------------------------------------------------- /streaming-runtime-samples/tutorials/2.1-multibiner-bridge-production-mode.yaml: -------------------------------------------------------------------------------- 1 | # 2.1 Multibinder Bridge - production env 2 | 3 | ################################################# 4 | # ADMIN responsibility 5 | ################################################# 6 | 7 | apiVersion: streaming.tanzu.vmware.com/v1alpha1 8 | kind: ClusterStream 9 | metadata: 10 | name: kafka-cluster-stream 11 | spec: 12 | name: data-in 13 | streamModes: ["read", "write"] # Note enforced yet 14 | storage: 15 | server: 16 | url: "kafka.default.svc.cluster.local:9092" 17 | protocol: "kafka" 18 | reclaimPolicy: "Retain" 19 | --- 20 | apiVersion: streaming.tanzu.vmware.com/v1alpha1 21 | kind: ClusterStream 22 | metadata: 23 | name: rabbitmq-cluster-stream 24 | spec: 25 | name: data-out 26 | streamModes: ["read", "write"] 27 | storage: 28 | server: 29 | url: "rabbitmq.default.svc.cluster.local:5672" 30 | protocol: "rabbitmq" 31 | reclaimPolicy: "Retain" 32 | --- 33 | ################################################# 34 | # DEVELOPER responsibility 35 | ################################################# 36 | 37 | apiVersion: streaming.tanzu.vmware.com/v1alpha1 38 | kind: Stream 39 | metadata: 40 | name: data-in-stream 41 | spec: 42 | name: data-in 43 | protocol: "kafka" 44 | storage: 45 | clusterStream: kafka-cluster-stream # Claims the pre-provisioned Kafka ClusterStream. 46 | --- 47 | apiVersion: streaming.tanzu.vmware.com/v1alpha1 48 | kind: Processor 49 | metadata: 50 | name: multibinder-processor 51 | spec: 52 | type: SRP 53 | inputs: 54 | - name: data-in-stream 55 | outputs: 56 | - name: data-out-stream 57 | --- 58 | apiVersion: streaming.tanzu.vmware.com/v1alpha1 59 | kind: Stream 60 | metadata: 61 | name: data-out-stream 62 | spec: 63 | name: data-out 64 | protocol: "rabbitmq" 65 | storage: 66 | clusterStream: rabbitmq-cluster-stream # Claims the pre-provisioned rabbitmq ClusterStream. 67 | -------------------------------------------------------------------------------- /streaming-runtime-samples/spring-cloud-stream/streaming-pipeline-ticktock.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: streaming.tanzu.vmware.com/v1alpha1 3 | kind: Processor 4 | metadata: 5 | # https://docs.spring.io/stream-applications/docs/2021.1.2/reference/html/#spring-cloud-stream-modules-time-source 6 | name: time-source 7 | spec: 8 | type: SCS 9 | outputs: 10 | - name: "timestamps-stream" 11 | template: 12 | spec: 13 | containers: 14 | - name: scdf-time-source-kafka 15 | image: springcloudstream/time-source-kafka:3.2.0 16 | env: 17 | - name: SPRING_CLOUD_STREAM_POLLER_FIXED-DELAY 18 | value: "2000" 19 | - name: TIME_DATE-FORMAT 20 | value: "dd/MM/yyyy HH:mm:ss" 21 | --- 22 | apiVersion: streaming.tanzu.vmware.com/v1alpha1 23 | kind: Processor 24 | metadata: 25 | #https://docs.spring.io/stream-applications/docs/2021.1.2/reference/html/#spring-cloud-stream-modules-transform-processor 26 | name: transformer 27 | spec: 28 | type: SCS 29 | inputs: 30 | - name: "timestamps-stream" 31 | outputs: 32 | - name: "uppercase-stream" 33 | template: 34 | spec: 35 | containers: 36 | - name: scdf-transform-processor-kafka 37 | image: springcloudstream/transform-processor-kafka:3.2.0 38 | env: 39 | - name: SPEL_FUNCTION_EXPRESSION 40 | value: "payload.toUpperCase()" 41 | --- 42 | apiVersion: streaming.tanzu.vmware.com/v1alpha1 43 | kind: Processor 44 | metadata: 45 | #https://docs.spring.io/stream-applications/docs/2021.1.2/reference/html/#spring-cloud-stream-modules-log-sink 46 | name: log-sink 47 | spec: 48 | type: SCS 49 | inputs: 50 | - name: "uppercase-stream" 51 | template: 52 | spec: 53 | containers: 54 | - name: scdf-log-sink-kafka 55 | image: springcloudstream/log-sink-kafka:3.2.0 56 | env: 57 | - name: LOG_EXPRESSION 58 | value: "'My uppercase timestamp is: ' + payload" 59 | -------------------------------------------------------------------------------- /streaming-runtime-operator/streaming-runtime/src/main/java/com/vmware/tanzu/streaming/runtime/dataschema/DataSchemaAvroConverter.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2022-2022 the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * https://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.vmware.tanzu.streaming.runtime.dataschema; 17 | 18 | import org.apache.avro.Schema; 19 | import org.apache.avro.SchemaBuilder; 20 | 21 | /** 22 | * Implementers of this interface can convert some custom Data Schema representation into a common Avro schema. 23 | */ 24 | public interface DataSchemaAvroConverter { 25 | 26 | /** 27 | * @return Returns the data schema representation type that this convertor support. 28 | */ 29 | String getSupportedDataSchemaType(); 30 | 31 | /** 32 | * Computes an Avro schema instance form the provided context. 33 | * 34 | * @param context All stream schema information necessary to compute the Avro schema. The context is mutable, 35 | * allowing converters to alter its state! 36 | * 37 | * @return Returns Avro schema instance computed form the input context. Mind that the context is mutable as well 38 | * allowing the convertor implementation to alter context's internal structures. 39 | */ 40 | Schema toAvro(DataSchemaProcessingContext context); 41 | 42 | default Schema nullableSchema(Schema schema) { 43 | return schema.isNullable() ? schema : Schema.createUnion(SchemaBuilder.builder().nullType(), schema); 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /udf-utilities/streaming-runtime-udf-aggregator-java/src/main/java/com/tanzu/streaming/runtime/udf/aggregator/Releaser.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2022-2022 the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * https://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.tanzu.streaming.runtime.udf.aggregator; 17 | 18 | import java.util.concurrent.ConcurrentHashMap; 19 | 20 | import org.springframework.messaging.MessageHeaders; 21 | 22 | /** 23 | * A call back interface that allows the UDF to refine the computed aggregate and target headers before they are sent. 24 | * For example it can remove (or add new) entries to the aggregate. Every aggregate entry results in a separate message 25 | * sent downstream! 26 | */ 27 | public interface Releaser { 28 | 29 | /** 30 | * Callback invoked with the aggregated state before later is sent down stream. 31 | * @param outputAggregatedState aggregated state to sent. 32 | * @return Returns the refined aggregated state to sent. 33 | */ 34 | ConcurrentHashMap release(ConcurrentHashMap outputAggregatedState); 35 | 36 | /** 37 | * Callback to augment the target headers to be used for sending the aggregate entries downstream. Same headers are 38 | * used for all messages part of this aggregate. 39 | * @param outputHeaders The message headers to augment; 40 | * @return Returns the augmented target headers. 41 | */ 42 | MessageHeaders headers(MessageHeaders outputHeaders); 43 | } 44 | -------------------------------------------------------------------------------- /streaming-runtime-operator/streaming-runtime/src/main/java/com/vmware/tanzu/streaming/runtime/protocol/ProtocolDeploymentAdapter.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2022-2022 the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * https://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.vmware.tanzu.streaming.runtime.protocol; 17 | 18 | import com.vmware.tanzu.streaming.models.V1alpha1ClusterStream; 19 | import io.kubernetes.client.openapi.ApiException; 20 | import io.kubernetes.client.openapi.models.V1OwnerReference; 21 | 22 | public interface ProtocolDeploymentAdapter { 23 | 24 | /** 25 | * @return Identifier of the binder protocol supported by this deployment editor. 26 | */ 27 | String getProtocolName(); 28 | 29 | /** 30 | * Identifies uniquely the protocol deployment editor. 31 | * It defaults to the protocolName. 32 | * @return Unique protocol deployment editor identifier. 33 | */ 34 | default String getProtocolDeploymentEditorName() { 35 | return getProtocolName(); 36 | } 37 | 38 | void createIfNotFound(V1OwnerReference ownerReference, String namespace, V1alpha1ClusterStream clusterStream) throws ApiException; 39 | 40 | default void postCreateConfiguration(V1OwnerReference ownerReference, String namespace, 41 | V1alpha1ClusterStream clusterStream) throws ApiException { 42 | } 43 | 44 | boolean isRunning(V1OwnerReference ownerReference, String namespace); 45 | 46 | String getStorageAddress(V1OwnerReference ownerReference, String namespace, boolean isServiceBindingEnabled); 47 | } 48 | -------------------------------------------------------------------------------- /srp-processor/src/main/java/com/tanzu/streaming/runtime/srp/processor/window/IdleWindowsWatchdog.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2022-2022 the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * https://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.tanzu.streaming.runtime.srp.processor.window; 17 | 18 | import java.io.IOException; 19 | import java.util.concurrent.ExecutorService; 20 | import java.util.concurrent.Executors; 21 | import java.util.concurrent.Future; 22 | import java.util.concurrent.TimeUnit; 23 | 24 | import javax.annotation.PostConstruct; 25 | import javax.annotation.PreDestroy; 26 | 27 | public class IdleWindowsWatchdog implements AutoCloseable { 28 | 29 | private final ExecutorService executor; 30 | 31 | private final IdleWindowsReleaser idleWindowsReleaser; 32 | 33 | public IdleWindowsWatchdog(IdleWindowsReleaser idleWindowsReleaser) { 34 | this.idleWindowsReleaser = idleWindowsReleaser; 35 | this.executor = Executors.newFixedThreadPool(2); 36 | } 37 | 38 | @PostConstruct 39 | public void initialize() { 40 | Future feature = this.executor.submit(this.idleWindowsReleaser); 41 | } 42 | 43 | @Override 44 | @PreDestroy 45 | public void close() throws IOException { 46 | this.idleWindowsReleaser.stopIdleReleaser(); 47 | try { 48 | this.executor.awaitTermination(1, TimeUnit.SECONDS); 49 | } 50 | catch (InterruptedException e) { 51 | e.printStackTrace(); 52 | } 53 | this.executor.shutdown(); 54 | } 55 | 56 | } 57 | -------------------------------------------------------------------------------- /streaming-runtime-samples/anomaly-detection/light/streaming-pipeline-light.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: streaming.tanzu.vmware.com/v1alpha1 2 | kind: Stream 3 | metadata: 4 | name: card-authorizations-stream 5 | spec: 6 | name: card-authorizations 7 | protocol: "kafka" 8 | dataSchemaContext: 9 | schema: 10 | namespace: com.tanzu.streaming.runtime.anomaly.detection 11 | name: AuthorizationAttempts 12 | fields: 13 | - name: card_number 14 | type: string 15 | - name: card_type 16 | type: string 17 | - name: card_expiry 18 | type: string 19 | - name: name 20 | type: string 21 | timeAttributes: 22 | # As the message payload doesn't have an explicit event-time use the Kafka ingestion time stored in the header as a timestamp. 23 | # Generated watermark uses 3 sec. out-of-orderness tolerance. 24 | - name: header.timestamp 25 | watermark: "`header.timestamp` - INTERVAL '3' SECOND" 26 | --- 27 | apiVersion: streaming.tanzu.vmware.com/v1alpha1 28 | kind: Processor 29 | metadata: 30 | name: fraud-detector 31 | spec: 32 | type: SRP 33 | inputs: 34 | - name: card-authorizations-stream 35 | outputs: 36 | - name: fraud-alert-stream 37 | attributes: 38 | srp.window: 5s 39 | srp.window.idle.timeout: 60s 40 | template: 41 | spec: 42 | containers: 43 | - name: fraud-detection-udf 44 | image: ghcr.io/vmware-tanzu/streaming-runtimes/udf-anomaly-detection-js:latest 45 | --- 46 | apiVersion: streaming.tanzu.vmware.com/v1alpha1 47 | kind: Processor 48 | metadata: 49 | #https://docs.spring.io/stream-applications/docs/2021.1.2/reference/html/#spring-cloud-stream-modules-log-sink 50 | name: fraud-alert 51 | spec: 52 | type: SCS 53 | inputs: 54 | - name: fraud-alert-stream 55 | template: 56 | spec: 57 | containers: 58 | - name: scdf-log-sink-kafka 59 | image: springcloudstream/log-sink-kafka:3.2.0 60 | env: 61 | - name: LOG_EXPRESSION 62 | value: "'Possible fraud transactions: ' + payload" 63 | -------------------------------------------------------------------------------- /streaming-runtime-operator/streaming-runtime/src/main/java/com/vmware/tanzu/streaming/runtime/config/StreamingRuntimeConfiguration.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2022-2022 the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * https://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.vmware.tanzu.streaming.runtime.config; 17 | 18 | import io.kubernetes.client.extended.controller.Controller; 19 | import io.kubernetes.client.extended.controller.ControllerManager; 20 | import io.kubernetes.client.informer.SharedInformerFactory; 21 | import org.slf4j.Logger; 22 | import org.slf4j.LoggerFactory; 23 | 24 | import org.springframework.boot.CommandLineRunner; 25 | import org.springframework.context.annotation.Bean; 26 | import org.springframework.context.annotation.Configuration; 27 | 28 | @Configuration(proxyBeanMethods = false) 29 | public class StreamingRuntimeConfiguration { 30 | 31 | private static final Logger LOG = LoggerFactory.getLogger(StreamingRuntimeConfiguration.class); 32 | 33 | @Bean(destroyMethod = "shutdown") 34 | ControllerManager controllerManager(SharedInformerFactory sharedInformerFactory, Controller[] controllers) { 35 | return new ControllerManager(sharedInformerFactory, controllers); 36 | } 37 | 38 | @Bean 39 | CommandLineRunner commandLineRunner(ControllerManager controllerManager) { 40 | return args -> { 41 | LOG.info("Start ControllerManager"); 42 | try { 43 | new Thread(controllerManager, "ControllerManager").start(); 44 | } 45 | catch (Exception exception) { 46 | LOG.info("Exit ControllerManager"); 47 | } 48 | }; 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /streaming-runtime-samples/tests/test-all.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: streaming.tanzu.vmware.com/v1alpha1 2 | kind: ClusterStream 3 | metadata: 4 | name: cluster-stream-kafka-1 5 | spec: 6 | keys: ["truckclass", "truckid"] 7 | streamModes: ["read", "write"] 8 | storage: 9 | server: 10 | url: "http://localhost:8080" 11 | protocol: "kafka" 12 | attributes: 13 | key1: "value1" 14 | reclaimPolicy: "Retain" 15 | --- 16 | apiVersion: streaming.tanzu.vmware.com/v1alpha1 17 | kind: ClusterStream 18 | metadata: 19 | name: cluster-stream-rabbitmq-1 20 | spec: 21 | keys: ["truckclass", "truckid"] 22 | streamModes: ["read", "write"] 23 | storage: 24 | server: 25 | url: "http://localhost:8080" 26 | protocol: "rabbitmq" 27 | attributes: 28 | key1: "value1" 29 | reclaimPolicy: "Retain" 30 | --- 31 | apiVersion: streaming.tanzu.vmware.com/v1alpha1 32 | kind: Stream 33 | metadata: 34 | name: kafka-stream-1 35 | spec: 36 | keys: ["album", "genre"] 37 | streamMode: ["read"] 38 | protocol: "kafka" 39 | storage: 40 | clusterStream: "cluster-stream-kafka-1" 41 | --- 42 | apiVersion: streaming.tanzu.vmware.com/v1alpha1 43 | kind: Stream 44 | metadata: 45 | name: rabbitmq-stream-1 46 | spec: 47 | keys: ["truckclass", "truckid"] 48 | streamMode: ["write"] 49 | protocol: "rabbitmq" 50 | storage: 51 | clusterStream: "cluster-stream-rabbitmq-1" 52 | --- 53 | apiVersion: streaming.tanzu.vmware.com/v1alpha1 54 | kind: Processor 55 | metadata: 56 | name: processor1 57 | spec: 58 | inputs: 59 | sources: 60 | - name: "kafka-stream-1" 61 | outputs: 62 | - name: "rabbitmq-stream-1" 63 | template: 64 | spec: 65 | containers: 66 | - name: uppercase-grpc 67 | image: ghcr.io/vmware-tanzu/streaming-runtimes/udf-uppercase-java:latest 68 | env: 69 | - name: SPRING_CLOUD_FUNCTION_DEFINITION 70 | value: uppercase 71 | - name: SPRING_CLOUD_FUNCTION_GRPC_MODE 72 | value: server 73 | - name: SPRING_CLOUD_FUNCTION_GRPC_PORT 74 | value: "55554" 75 | 76 | 77 | -------------------------------------------------------------------------------- /sr-common/src/main/java/com/tanzu/streaming/runtime/processor/common/proto/PayloadCollection.java: -------------------------------------------------------------------------------- 1 | // Generated by the protocol buffer compiler. DO NOT EDIT! 2 | // source: payload_collection.proto 3 | 4 | package com.tanzu.streaming.runtime.processor.common.proto; 5 | 6 | public final class PayloadCollection { 7 | private PayloadCollection() {} 8 | public static void registerAllExtensions( 9 | com.google.protobuf.ExtensionRegistryLite registry) { 10 | } 11 | 12 | public static void registerAllExtensions( 13 | com.google.protobuf.ExtensionRegistry registry) { 14 | registerAllExtensions( 15 | (com.google.protobuf.ExtensionRegistryLite) registry); 16 | } 17 | static final com.google.protobuf.Descriptors.Descriptor 18 | internal_static_GrpcPayloadCollection_descriptor; 19 | static final 20 | com.google.protobuf.GeneratedMessageV3.FieldAccessorTable 21 | internal_static_GrpcPayloadCollection_fieldAccessorTable; 22 | 23 | public static com.google.protobuf.Descriptors.FileDescriptor 24 | getDescriptor() { 25 | return descriptor; 26 | } 27 | private static com.google.protobuf.Descriptors.FileDescriptor 28 | descriptor; 29 | static { 30 | java.lang.String[] descriptorData = { 31 | "\n\030payload_collection.proto\"(\n\025GrpcPayloa" + 32 | "dCollection\022\017\n\007payload\030\001 \003(\014B6\n2com.tanz" + 33 | "u.streaming.runtime.processor.common.pro" + 34 | "toP\001b\006proto3" 35 | }; 36 | descriptor = com.google.protobuf.Descriptors.FileDescriptor 37 | .internalBuildGeneratedFileFrom(descriptorData, 38 | new com.google.protobuf.Descriptors.FileDescriptor[] { 39 | }); 40 | internal_static_GrpcPayloadCollection_descriptor = 41 | getDescriptor().getMessageTypes().get(0); 42 | internal_static_GrpcPayloadCollection_fieldAccessorTable = new 43 | com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( 44 | internal_static_GrpcPayloadCollection_descriptor, 45 | new java.lang.String[] { "Payload", }); 46 | } 47 | 48 | // @@protoc_insertion_point(outer_class_scope) 49 | } 50 | -------------------------------------------------------------------------------- /streaming-runtime-samples/tests/streaming-pipeline-auto-provisioned-streams.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: streaming.tanzu.vmware.com/v1alpha1 2 | kind: Processor 3 | metadata: 4 | name: srp-test-auto-provisioned-streams 5 | spec: 6 | type: SRP 7 | replicas: 1 8 | inputs: 9 | - name: "data-in-stream" 10 | outputs: 11 | - name: "data-out-stream" 12 | attributes: 13 | # The srp.input.schemaRegistryUri attribute can override the input Stream.dataSchemaContext.schemaRegistryUri configuration. 14 | # srp.input.schemaRegistryUri: "http://s-registry.streaming-runtime.svc.cluster.local:8081" 15 | 16 | # The srp.input.timestampExpression attribute can override the input Stream.dataSchemaContext.timeAttributes.name 17 | # or Stream.dataSchemaContext.timeAttributes.watermark configuration. 18 | # srp.input.timestampExpression: "score_time" 19 | 20 | # Can override the out-orderness values extracted from the input Stream.dataSchemaContext.timeAttributes.watermark expression. 21 | #srp.maxOutOfOrderness: 500ms 22 | 23 | #srp.allowedLateness: 0s 24 | 25 | # Setting the srp.window activates the Tumbling Time-Window Aggregation! Not setting it disables the aggregation. 26 | # srp.window: 3s 27 | 28 | # Only used if the srp.window is set 29 | # srp.window.idle.timeout: 30s 30 | 31 | # Generate output headers using other header values (header.) 32 | # or JsonPath expressions apply over the output message body (applicable for Avro content types as well) 33 | srp.output.headers: "test2=header.watermark;test3=header.watermark" 34 | 35 | # Not defining side-containers is equivalent to set srp.skipUdf = true 36 | # srp.skipUdf: "true" 37 | 38 | template: 39 | spec: 40 | containers: 41 | - name: my-udf 42 | image: ghcr.io/vmware-tanzu/streaming-runtimes/udf-uppercase-go:0.1 43 | # --- 44 | # apiVersion: streaming.tanzu.vmware.com/v1alpha1 45 | # kind: Stream 46 | # metadata: 47 | # name: data-out-stream 48 | # spec: 49 | # name: data-out-stream 50 | # protocol: rabbitmq 51 | # storage: 52 | # clusterStream: data-out-stream-cluster-stream -------------------------------------------------------------------------------- /docs/architecture/processors/scs/overview.md: -------------------------------------------------------------------------------- 1 | # Spring Cloud Stream Processor (SCS) 2 | 3 | Runs [Spring Cloud Stream](https://spring.io/projects/spring-cloud-stream) applications as processors in the pipeline. One can choose for the extensive set (60+) of [pre-built streaming applications](https://dataflow.spring.io/docs/applications/pre-packaged/#stream-applications) or build a custom one. It is possible to build and deploy [polyglot applications](https://dataflow.spring.io/docs/recipes/polyglot/processor/) as long as they interact with the input/output streams manually. 4 | 5 | ## Usage 6 | 7 | ```yaml 8 | apiVersion: streaming.tanzu.vmware.com/v1alpha1 9 | kind: Processor 10 | metadata: 11 | # Name of the source of the Spring Cloud Stream. 12 | # List: https://docs.spring.io/stream-applications/docs/2021.1.2/reference/html/#sources 13 | name: 14 | spec: 15 | # Type of the processor. In this case SCS (Spring Cloud Stream) 16 | type: SCS 17 | # Input Stream name for the processor to get data from 18 | inputs: 19 | - name: 20 | # Output Stream name for the processor to send data 21 | outputs: 22 | - name: 23 | template: 24 | spec: 25 | # Container for the Spring Cloud Stream image. 26 | containers: 27 | - name: 28 | image: springcloudstream/: 29 | # List of environment variables that are required for the processor. 30 | env: 31 | ``` 32 | 33 | ## Examples 34 | 35 | ![](../../../samples/spring-cloud-stream/ticktock-deployment.svg) 36 | 37 | - [Spring Cloud Stream pipeline](../../../samples/spring-cloud-stream/tick-tock.md) (SCS) - show how to build streaming pipelines using Spring Cloud Stream application as processors. 38 | - [streaming-pipeline-ticktock-partitioned-better.yaml](https://github.com/vmware-tanzu/streaming-runtimes/blob/main/streaming-runtime-samples/spring-cloud-stream/streaming-pipeline-ticktock-partitioned-better.yaml) example shows how to data-partition the TickTock application leveraging the SCS [Data-Partitioning](../../data-partitioning/data-partitioning.md) capabilities. -------------------------------------------------------------------------------- /docs/samples/overview.md: -------------------------------------------------------------------------------- 1 | 2 | ## Tutorials 3 | 4 | [Step by step](./tutorials.md) tutorials introduce the Streaming Runtime features and how to use them. 5 | 6 | ## Use Cases 7 | 8 | Example use cases below demonstrate how to implement various streaming and event-driven use case scenarios with the help of the `Streaming Runtime`. 9 | 10 | The [setup instructions](./instructions.md) helps to setup the demo infrastructure (e.g. minikube) and to explore the demo results - e.g. exploring the Apache Kafka topics and/or RabbitMQ queues data. 11 | 12 | * ![anomaly detection logo](./anomaly-detection/anomaly-detection-logo.png){ align=left, width="25"} [Anomaly Detection - FSQL](./anomaly-detection/anomaly-detection.md) (FSQL, SRP)- detect, in real time, suspicious credit card transactions, and extract them for further processing. 13 | * ![anomaly detection logo](./anomaly-detection/anomaly-detection-logo.png){ align=left, width="25"} [Anomaly Detection - SRP](./anomaly-detection/anomaly-detection-srp.md) (SRP, CSC)- detect, in real time, suspicious credit card transactions, and extract them for further processing. 14 | * ![clickstream logo](./clickstream/clickstream-logo.png){ align=left, width="25"} [Clickstream Analysis](clickstream/clickstream.md) (FSQL, SRP) - for an input clickstream stream, we want to know who are the high status customers, currently using the website so that we can engage with them or to find how much they buy or how long they stay on the site that day. 15 | * ![iot logo](./iot-monitoring/iot-logo.png){ align=left, width="20"} [IoT Monitoring analysis](iot-monitoring/iot-monitoring.md) (FSQL, SRP) - real-time analysis of IoT monitoring log. 16 | * ![top-k-songs-logo](./top-k-songs/top-k-songs-logo.png){ align=left, width="20"} [Streaming Music Service](top-k-songs/top-k-songs.md) (FSQL, SRP) - music ranking application that continuously computes the latest Top-K music charts based on song play events collected in real-time. 17 | * [Spring Cloud Stream pipeline](spring-cloud-stream/tick-tock.md) (SCS) - show how to build streaming pipelines using Spring Cloud Stream application as processors. 18 | * [Online Game Statistics](./online-gaming-statistics/online-gaming-statistics.md) (SRP+UDF) - WIP 19 | 20 | 21 | 22 | -------------------------------------------------------------------------------- /udf-utilities/streaming-runtime-udf-aggregator-java/pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 4 | 4.0.0 5 | com.tanzu.streaming.runtime.udf 6 | streaming-runtime-udf-aggregator 7 | 0.0.1-SNAPSHOT 8 | streaming-runtime-udf-aggregator 9 | Demo project for Spring Boot 10 | 11 | 11 12 | 11 13 | 11 14 | 15 | 16 | 17 | com.tanzu.streaming.runtime.processor.common 18 | sr-common 19 | 0.0.1-SNAPSHOT 20 | 21 | 22 | 27 | 28 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | org.apache.maven.plugins 40 | maven-compiler-plugin 41 | 42 | 11 43 | 11 44 | 45 | 46 | 47 | 48 | 49 | 50 | maven-deploy-plugin 51 | 2.8.2 52 | 53 | 54 | 55 | 56 | 57 | 58 | github 59 | GitHub OWNER Apache Maven Packages 60 | https://maven.pkg.github.com/vmware-tanzu/streaming-runtimes 61 | 62 | 63 | 64 | 65 | -------------------------------------------------------------------------------- /streaming-runtime-samples/tutorials/6-time-window-aggregation.yaml: -------------------------------------------------------------------------------- 1 | # 6. Tumbling Time-Window Aggregation 2 | apiVersion: streaming.tanzu.vmware.com/v1alpha1 3 | kind: Stream 4 | metadata: 5 | name: data-in-stream 6 | spec: 7 | name: data-in 8 | protocol: kafka 9 | dataSchemaContext: 10 | # Here the schema is used only with descriptive purpose! It is not used to validate the 11 | # stream content. 12 | # Check the FSQL samples to find how to enforce Avro schemas and use schema registries. 13 | schema: 14 | namespace: sr.poc.online.gaming 15 | name: User 16 | fields: 17 | - name: id 18 | type: string 19 | - name: fullName 20 | type: string 21 | - name: team 22 | type: string 23 | - name: email 24 | type: string 25 | - name: score 26 | type: int 27 | - name: score_time 28 | type: long_timestamp-millis 29 | timeAttributes: 30 | # Data field to be used as an event-time. Generated watermark uses 2 sec. out-of-orderness. 31 | # Note: The Out-Of-Order events are not Late Events! The LateEvents are handled differently. 32 | - name: score_time 33 | watermark: "`score_time` - INTERVAL '2' SECOND" 34 | --- 35 | apiVersion: streaming.tanzu.vmware.com/v1alpha1 36 | kind: Processor 37 | metadata: 38 | name: user-scores-processor 39 | spec: 40 | type: SRP 41 | inputs: 42 | - name: data-in-stream 43 | outputs: 44 | - name: user-scores-stream 45 | attributes: 46 | srp.window: 5s # Tumbling Time Window of 5 seconds. 47 | srp.window.idle.timeout: 60s # Allow partial release of idle time-windows. 48 | srp.lateEventMode: SIDE_CHANNEL # Send late events a side-channel stream. By default late events are discarded. 49 | template: 50 | spec: 51 | containers: 52 | - name: scores-by-user-javascript 53 | # The UDF implementation is in the './6-user-score-aggregation-js/aggregate.js' 54 | image: ghcr.io/vmware-tanzu/streaming-runtimes/user-score-js:latest 55 | --- 56 | apiVersion: streaming.tanzu.vmware.com/v1alpha1 57 | kind: Stream 58 | metadata: 59 | name: user-scores-stream 60 | spec: 61 | name: user-scores 62 | protocol: kafka 63 | -------------------------------------------------------------------------------- /streaming-runtime-operator/streaming-runtime/src/main/java/com/vmware/tanzu/streaming/runtime/processor/ProcessorAdapter.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2022-2022 the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * https://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.vmware.tanzu.streaming.runtime.processor; 17 | 18 | import java.io.IOException; 19 | import java.util.List; 20 | 21 | import com.vmware.tanzu.streaming.models.V1alpha1Processor; 22 | import com.vmware.tanzu.streaming.models.V1alpha1Stream; 23 | import com.vmware.tanzu.streaming.runtime.ProcessorStatusException; 24 | import io.kubernetes.client.openapi.ApiException; 25 | import io.kubernetes.client.openapi.models.V1OwnerReference; 26 | 27 | /** 28 | * Adapter for different types of streaming (CRD) Processor implementations. Uses the V1alpha1Processor attributes to 29 | * configure the different processor types. 30 | */ 31 | public interface ProcessorAdapter { 32 | 33 | /** 34 | * @return Processor adapter type. Currently supported types are: SRP, SCS, FSQL. 35 | */ 36 | String type(); 37 | 38 | /** 39 | * Instantiate the Processor Adapter. Could be 40 | * 41 | * @param processor Processor CRD definition. 42 | * @param ownerReference Processor's resource owner. 43 | * @param inputStreams List of input Streams. 44 | * @param outputStreams List of output Streams. 45 | * @throws IOException 46 | * @throws ApiException 47 | * @throws ProcessorStatusException 48 | */ 49 | void createProcessor(V1alpha1Processor processor, V1OwnerReference ownerReference, 50 | List inputStreams, List outputStreams) 51 | throws IOException, ApiException, ProcessorStatusException; 52 | } 53 | -------------------------------------------------------------------------------- /srp-processor/src/main/java/com/tanzu/streaming/runtime/srp/processor/window/TumblingWindowService.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2022-2022 the original author or authors. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * https://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.tanzu.streaming.runtime.srp.processor.window; 17 | 18 | import java.time.Duration; 19 | import java.util.List; 20 | 21 | import com.tanzu.streaming.runtime.srp.processor.window.state.StateEntry; 22 | 23 | import org.springframework.messaging.support.MessageBuilder; 24 | 25 | public interface TumblingWindowService { 26 | 27 | /** 28 | * 29 | * @param windowStartTime 30 | * @param windowEndTime 31 | * @param windowAggregate 32 | * @param isPartial 33 | * @return 34 | */ 35 | List> computeWindowAggregate(Duration windowStartTime, Duration windowEndTime, 36 | StateEntry windowAggregate, boolean isPartial); 37 | 38 | /** 39 | * Send the collected aggregation downstream. 40 | * 41 | * @param aggregateEventTime Event time to be used with the message sent. 42 | * @param outputWatermark Watermark to be propagated with the message sent. 43 | * @param messageBuilder Builder with the message aggregate to be sent. 44 | */ 45 | void send(Duration aggregateEventTime, Duration outputWatermark, MessageBuilder messageBuilder); 46 | 47 | void handleLateEvent(Duration aggregateEventTime, Duration outputWatermark, 48 | MessageBuilder messageBuilder); 49 | 50 | void releaseWindow(long windowStartTimeNs, boolean removeWindow, boolean isPartial, boolean isLateEventResend); 51 | 52 | long getOldestWindowId(); 53 | } 54 | -------------------------------------------------------------------------------- /streaming-runtime-samples/tutorials/8-secrets-management-with-service-binding.yaml: -------------------------------------------------------------------------------- 1 | # 8. Secretes Management with Service Binding spec. 2 | apiVersion: streaming.tanzu.vmware.com/v1alpha1 3 | kind: Stream 4 | metadata: 5 | name: data-in-stream 6 | spec: 7 | name: data-in 8 | protocol: "kafka" 9 | --- 10 | apiVersion: streaming.tanzu.vmware.com/v1alpha1 11 | kind: Processor 12 | metadata: 13 | name: multibinder-processor 14 | spec: 15 | type: SRP 16 | inputs: 17 | - name: data-in-stream 18 | outputs: 19 | - name: data-out-stream 20 | --- 21 | apiVersion: streaming.tanzu.vmware.com/v1alpha1 22 | kind: Stream 23 | metadata: 24 | name: data-out-stream 25 | spec: 26 | name: data-out 27 | protocol: "rabbitmq" 28 | # Binding refs a Secret with same name. The stream controller uses this binding to configure ClusterStream's auto-creation 29 | binding: "data-out-stream-cluster-stream-default-user" 30 | 31 | attributes: 32 | # Prerequisites to provision RabbitMQ clusters with the the "rabbitmq-operator": 33 | # https://vmware-tanzu.github.io/streaming-runtimes/install/#optional-install-rabbitmq-cluster-and-message-topology-operators 34 | protocolAdapterName: "rabbitmq-operator" 35 | --- 36 | apiVersion: servicebinding.io/v1beta1 37 | kind: ServiceBinding 38 | metadata: 39 | name: streaming-runtime-rabbitmq 40 | spec: 41 | service: 42 | apiVersion: v1 43 | kind: Secret 44 | # This is the secret generated by the RabbitMQ Cluster operator. Convention is: 45 | # '-default-user' 46 | # When the ClusterStream is automatically generated from the Stream Operator, the name convention is: 47 | # '-cluster-stream-default-user' 48 | name: data-out-stream-cluster-stream-default-user 49 | workload: 50 | apiVersion: apps/v1 51 | kind: Deployment 52 | # Currently, the convention expects that the workload name is: 'srp-'. 53 | # TODO: Explore generating the ServiceBindings resources in the StreamingRuntime Operator. 54 | name: srp-multibinder-processor 55 | env: 56 | # As we know that this processor uses Spring RabbitMQ configuration, here we pass in the expected conf. 57 | - name: SPRING_RABBITMQ_PASSWORD 58 | key: password 59 | - name: SPRING_RABBITMQ_USERNAME 60 | key: username 61 | --------------------------------------------------------------------------------