├── project
├── build.properties
└── plugins.sbt
├── client
├── project
│ ├── build.properties
│ └── plugins.sbt
├── src
│ └── main
│ │ ├── resources
│ │ └── application.conf
│ │ ├── protobuf
│ │ └── ArtifactState.proto
│ │ └── scala
│ │ └── client
│ │ └── artifactstate
│ │ ├── ArtifactStateStream.scala
│ │ └── ArtifactStateForEach.scala
├── build.sbt
└── README.md
├── gatling
├── project
│ ├── build.properties
│ └── plugins.sbt
├── build.sbt
├── src
│ └── test
│ │ ├── resources
│ │ └── application.conf
│ │ └── scala
│ │ └── com
│ │ └── lightbend
│ │ └── gatling
│ │ └── ArtifactStateScenario.scala
└── README.md
├── Blog_Model.png
├── K8s
├── nodes
│ ├── node-service-account.yaml
│ ├── akka-cluster-member-role.yaml
│ ├── node-role-binding.yaml
│ ├── node-service.yaml
│ └── node-deployment.yaml
├── endpoints
│ ├── endpoint-service-account.yaml
│ ├── endpoint-role-binding.yaml
│ ├── endpoint-service.yaml
│ └── endpoint-deployment.yaml
└── cassandra
│ ├── cassandra-db-service.yaml
│ └── cassandra-db-deployment.yaml
├── microk8s
├── nodes
│ ├── node-service-account.yaml
│ ├── akka-cluster-member-role.yaml
│ ├── node-service.yaml
│ ├── node-role-binding.yaml
│ ├── node-traefik-ingress.yaml
│ └── node-deployment.yaml
├── endpoints
│ ├── endpoint-service-account.yaml
│ ├── endpoint-role-binding.yaml
│ ├── endpoint-service.yaml
│ ├── endpoint-traefik-ingress.yaml
│ └── endpoint-deployment.yaml
├── dashboard
│ ├── dashboard-service-account.yaml
│ └── dashboard-cluster-role-binding.yaml
├── monitoring-grafana
│ └── grafana-ingress.yaml
├── monitoring-prometheus
│ └── prometheus-ingress.yaml
└── README.md
├── src
├── main
│ ├── scala
│ │ └── com
│ │ │ └── lightbend
│ │ │ └── artifactstate
│ │ │ ├── serializer
│ │ │ ├── MsgSerializeMarker.scala
│ │ │ └── EventSerializeMarker.scala
│ │ │ ├── endpoint
│ │ │ ├── JsonFormats.scala
│ │ │ ├── ArtifactStatePocAPI.scala
│ │ │ ├── GrpcArtifactStateServiceImpl.scala
│ │ │ └── ArtifactStateRoutes.scala
│ │ │ ├── actors
│ │ │ ├── ClusterListenerActor.scala
│ │ │ └── ArtifactStateEntityActor.scala
│ │ │ └── app
│ │ │ └── StartNode.scala
│ ├── resources
│ │ ├── telemetry-graphite.conf
│ │ ├── telemetry-graphite-mac.conf
│ │ ├── telemetry-prometheus.conf
│ │ ├── telemetry-elasticsearch.conf
│ │ ├── nonsup-endpoint-application-docker.conf
│ │ ├── nonsup-endpoint-application.conf
│ │ ├── nonsup-endpoint-application-docker-dns.conf
│ │ ├── endpoint-application.conf
│ │ ├── endpoint-application-docker-dns.conf
│ │ ├── endpoint-application-docker.conf
│ │ ├── nonsup-cluster-application-base.conf
│ │ ├── cluster-application-base.conf
│ │ ├── nonsup-endpoint-application-k8s.conf
│ │ ├── endpoint-application-k8s.conf
│ │ ├── nonsup-endpoint-application-base.conf
│ │ ├── logback.xml
│ │ ├── nonsup-cluster-application.conf
│ │ ├── cluster-application.conf
│ │ ├── endpoint-application-base.conf
│ │ ├── nonsup-cluster-application-docker.conf
│ │ ├── nonsup-cluster-application-docker-dns.conf
│ │ ├── cluster-application-docker.conf
│ │ ├── cluster-application-docker-dns.conf
│ │ ├── nonsup-cluster-application-k8s.conf
│ │ ├── cluster-application-k8s.conf
│ │ └── telemetry.conf
│ └── protobuf
│ │ └── ArtifactState.proto
└── multi-jvm
│ └── scala
│ └── akka
│ ├── remote
│ └── testkit
│ │ └── STMultiNodeSpec.scala
│ └── cluster
│ └── typed
│ └── MultiNodeTypedClusterSpec.scala
├── .gitignore
├── telemetry
├── prometheus
│ ├── prometheus-ui-nodeport.yaml
│ ├── akka-ep-scraping-targets.yaml
│ ├── akka-node-scraping-targets.yaml
│ └── akka-cinnmaon-datasource.yaml
├── values.yaml
├── uninstall-akka-grafana-dashboards.sh
├── install-akka-grafana-dashboards.sh
└── akka-dashboards
│ ├── akka-remote-nodes.json
│ ├── akka-cluster.json
│ ├── akka-cluster-sharding.json
│ ├── akka-routers.json
│ ├── akka-http-endpoints.json
│ ├── akka-stopwatches.json
│ └── akka-http-clients.json
├── docker-compose-cassandra.yml
├── docker-compose-postgresdb.yml
├── OpenShift-3.x
├── README.md
├── create-persistent-volume.yaml
├── node-deployment.yaml
└── endpoint-deployment.yaml
├── OpenShift-4.1
├── README.md
├── create-persistent-volume.yaml
├── node-deployment-no-namespace.yaml
├── node-deployment.yaml
└── endpoint-deployment.yaml
├── nonsup-docker-compose.yml
├── DOCKER_DNS_YUGABYTE.md
├── docker-compose.yml
├── ddl-scripts
└── create_tables_postgres.sql
├── docker-compose-dns-yugabyte.yml
├── README.md
└── REF.md
/project/build.properties:
--------------------------------------------------------------------------------
1 | sbt.version=1.10.0
--------------------------------------------------------------------------------
/client/project/build.properties:
--------------------------------------------------------------------------------
1 | sbt.version=1.10.0
--------------------------------------------------------------------------------
/gatling/project/build.properties:
--------------------------------------------------------------------------------
1 | sbt.version=1.10.0
--------------------------------------------------------------------------------
/gatling/project/plugins.sbt:
--------------------------------------------------------------------------------
1 | addSbtPlugin("io.gatling" % "gatling-sbt" % "4.9.2")
2 |
--------------------------------------------------------------------------------
/Blog_Model.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/michael-read/akka-typed-distributed-state-blog/HEAD/Blog_Model.png
--------------------------------------------------------------------------------
/K8s/nodes/node-service-account.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | name: nodes-sa
5 |
--------------------------------------------------------------------------------
/microk8s/nodes/node-service-account.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | name: nodes-sa
5 |
--------------------------------------------------------------------------------
/K8s/endpoints/endpoint-service-account.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | name: endpoints-sa
5 |
--------------------------------------------------------------------------------
/microk8s/endpoints/endpoint-service-account.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | name: endpoints-sa
5 |
--------------------------------------------------------------------------------
/microk8s/dashboard/dashboard-service-account.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | name: admin-user
5 | namespace: kube-system
--------------------------------------------------------------------------------
/src/main/scala/com/lightbend/artifactstate/serializer/MsgSerializeMarker.scala:
--------------------------------------------------------------------------------
1 | package com.lightbend.artifactstate.serializer
2 |
3 | trait MsgSerializeMarker
4 |
--------------------------------------------------------------------------------
/src/main/scala/com/lightbend/artifactstate/serializer/EventSerializeMarker.scala:
--------------------------------------------------------------------------------
1 | package com.lightbend.artifactstate.serializer
2 |
3 | trait EventSerializeMarker
4 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | *.class
2 | .idea
3 | .bsp
4 | target/
5 | project/target
6 | akka-diagnostics/
7 | logs
8 | #cassandra volume
9 | data/cassandra-1/
10 | lightbend.sbt
--------------------------------------------------------------------------------
/client/src/main/resources/application.conf:
--------------------------------------------------------------------------------
1 | akka.grpc.client {
2 | "client.ArtifactStateService" {
3 | host = localhost
4 | port = 8082
5 | use-tls = false
6 | }
7 | }
--------------------------------------------------------------------------------
/src/main/resources/telemetry-graphite.conf:
--------------------------------------------------------------------------------
1 | cinnamon.chmetrics {
2 | reporters += statsd-reporter
3 | statsd-reporter {
4 | host = "localhost"
5 | host = ${?GRAPHITE-SANDBOX}
6 | }
7 | }
--------------------------------------------------------------------------------
/src/main/resources/telemetry-graphite-mac.conf:
--------------------------------------------------------------------------------
1 | cinnamon.chmetrics {
2 | reporters += "statsd-reporter"
3 | statsd-reporter {
4 | channel = tcp
5 | host = "localhost"
6 | host = ${?GRAPHITE-SANDBOX}
7 | }
8 | }
--------------------------------------------------------------------------------
/K8s/nodes/akka-cluster-member-role.yaml:
--------------------------------------------------------------------------------
1 | kind: Role
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | metadata:
4 | name: akka-cluster-member
5 | rules:
6 | - apiGroups: [""] # "" indicates the core API group
7 | resources: ["pods"]
8 | verbs: ["get", "watch", "list"]
9 |
--------------------------------------------------------------------------------
/microk8s/nodes/akka-cluster-member-role.yaml:
--------------------------------------------------------------------------------
1 | kind: Role
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | metadata:
4 | name: akka-cluster-member
5 | rules:
6 | - apiGroups: [""] # "" indicates the core API group
7 | resources: ["pods"]
8 | verbs: ["get", "watch", "list"]
9 |
--------------------------------------------------------------------------------
/client/project/plugins.sbt:
--------------------------------------------------------------------------------
1 |
2 | resolvers += "Akka library repository".at("https://repo.akka.io/maven")
3 | ThisBuild / libraryDependencySchemes ++= Seq(
4 | "org.scala-lang.modules" %% "scala-xml" % VersionScheme.Always
5 | )
6 |
7 | addSbtPlugin("com.lightbend.akka.grpc" % "sbt-akka-grpc" % "2.4.3")
--------------------------------------------------------------------------------
/microk8s/nodes/node-service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: node
5 | spec:
6 | type: ClusterIP
7 | ports:
8 | - name: akka-mgmt-http
9 | protocol: TCP
10 | port: 8558
11 | targetPort: akka-mgmt-http
12 | selector:
13 | tag: clusternode
14 |
--------------------------------------------------------------------------------
/K8s/nodes/node-role-binding.yaml:
--------------------------------------------------------------------------------
1 | kind: RoleBinding
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | metadata:
4 | name: nodes-akka-cluster
5 | roleRef:
6 | apiGroup: rbac.authorization.k8s.io
7 | kind: Role
8 | name: akka-cluster-member
9 | subjects:
10 | - kind: ServiceAccount
11 | name: nodes-sa
12 |
--------------------------------------------------------------------------------
/microk8s/nodes/node-role-binding.yaml:
--------------------------------------------------------------------------------
1 | kind: RoleBinding
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | metadata:
4 | name: nodes-akka-cluster
5 | roleRef:
6 | apiGroup: rbac.authorization.k8s.io
7 | kind: Role
8 | name: akka-cluster-member
9 | subjects:
10 | - kind: ServiceAccount
11 | name: nodes-sa
12 |
--------------------------------------------------------------------------------
/K8s/endpoints/endpoint-role-binding.yaml:
--------------------------------------------------------------------------------
1 | kind: RoleBinding
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | metadata:
4 | name: endpoint-akka-cluster
5 | roleRef:
6 | apiGroup: rbac.authorization.k8s.io
7 | kind: Role
8 | name: akka-cluster-member
9 | subjects:
10 | - kind: ServiceAccount
11 | name: endpoints-sa
12 |
--------------------------------------------------------------------------------
/K8s/nodes/node-service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: node
5 | spec:
6 | type: NodePort
7 | ports:
8 | - name: akka-mgmt-http
9 | protocol: TCP
10 | port: 8558
11 | targetPort: akka-mgmt-http
12 | nodePort: 30558
13 | selector:
14 | tag: clusternode
15 |
--------------------------------------------------------------------------------
/gatling/build.sbt:
--------------------------------------------------------------------------------
1 | ThisBuild / scalaVersion := "2.13.14"
2 |
3 | lazy val gatlingVersion = "3.12.0"
4 |
5 | enablePlugins(GatlingPlugin)
6 |
7 | libraryDependencies ++= Seq(
8 | "io.gatling.highcharts" % "gatling-charts-highcharts" % gatlingVersion,
9 | "io.gatling" % "gatling-test-framework" % gatlingVersion
10 | )
11 |
--------------------------------------------------------------------------------
/microk8s/endpoints/endpoint-role-binding.yaml:
--------------------------------------------------------------------------------
1 | kind: RoleBinding
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | metadata:
4 | name: endpoint-akka-cluster
5 | roleRef:
6 | apiGroup: rbac.authorization.k8s.io
7 | kind: Role
8 | name: akka-cluster-member
9 | subjects:
10 | - kind: ServiceAccount
11 | name: endpoints-sa
12 |
--------------------------------------------------------------------------------
/gatling/src/test/resources/application.conf:
--------------------------------------------------------------------------------
1 | loadtest {
2 | # provides the base URL: http://localhost:8082
3 | baseUrl = "http://localhost:8082"
4 | # sample baseURL when running locally in docker
5 | # baseUrl = "http://172.19.0.6:8082"
6 | # sample baseURL when running locally in minikube
7 | # baseUrl = "http://mk.local:30082"
8 | }
--------------------------------------------------------------------------------
/src/main/resources/telemetry-prometheus.conf:
--------------------------------------------------------------------------------
1 | akka.stdout-loglevel = info
2 | cinnamon.prometheus {
3 | exporters += http-server
4 | http-server {
5 | host = "0.0.0.0"
6 | port = 9001
7 | }
8 | }
9 |
10 | # turn off reporter since we're not using Elasticsearch here
11 | cinnamon.chmetrics {
12 | reporters += nop-reporter
13 | }
14 |
--------------------------------------------------------------------------------
/microk8s/monitoring-grafana/grafana-ingress.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.k8s.io/v1
2 | kind: Ingress
3 | metadata:
4 | name: grafana-http-ingress
5 | namespace: monitoring
6 | annotations:
7 | nginx.ingress.kubernetes.io/rewrite-target: /
8 | spec:
9 | defaultBackend:
10 | service:
11 | name: grafana
12 | port:
13 | number: 3000
--------------------------------------------------------------------------------
/microk8s/dashboard/dashboard-cluster-role-binding.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRoleBinding
3 | metadata:
4 | name: admin-user
5 | roleRef:
6 | apiGroup: rbac.authorization.k8s.io
7 | kind: ClusterRole
8 | name: cluster-admin
9 | subjects:
10 | - kind: ServiceAccount
11 | name: admin-user
12 | namespace: kube-system
--------------------------------------------------------------------------------
/microk8s/monitoring-prometheus/prometheus-ingress.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.k8s.io/v1
2 | kind: Ingress
3 | metadata:
4 | name: prometheus-http-ingress
5 | namespace: monitoring
6 | annotations:
7 | nginx.ingress.kubernetes.io/rewrite-target: /
8 | spec:
9 | defaultBackend:
10 | service:
11 | name: prometheus-operated
12 | port:
13 | number: 9090
--------------------------------------------------------------------------------
/telemetry/prometheus/prometheus-ui-nodeport.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: prometheus-ui
5 | namespace: monitoring
6 | spec:
7 | type: NodePort
8 | ports:
9 | - name: web
10 | nodePort: 30900
11 | port: 9090
12 | protocol: TCP
13 | targetPort: http-web
14 | selector:
15 | app.kubernetes.io/name: prometheus
16 |
--------------------------------------------------------------------------------
/microk8s/endpoints/endpoint-service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: endpoint
5 | spec:
6 | type: ClusterIP
7 | ports:
8 | - name: "8082"
9 | protocol: TCP
10 | port: 8082
11 | targetPort: 8082
12 | - name: akka-mgmt-http
13 | protocol: TCP
14 | port: 8558
15 | targetPort: akka-mgmt-http
16 | selector:
17 | tag: endpoint
18 |
--------------------------------------------------------------------------------
/microk8s/nodes/node-traefik-ingress.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.k8s.io/v1
2 | kind: Ingress
3 | metadata:
4 | name: node
5 | annotations:
6 | kubernetes.io/ingress.class: traefik
7 | spec:
8 | rules:
9 | - http:
10 | paths:
11 | - path: /cluster
12 | pathType: Prefix
13 | backend:
14 | service:
15 | name: node
16 | port:
17 | number: 8558
--------------------------------------------------------------------------------
/src/main/resources/telemetry-elasticsearch.conf:
--------------------------------------------------------------------------------
1 | cinnamon.chmetrics {
2 | reporters += elasticsearch-reporter
3 | reporters += "jmx-reporter"
4 | elasticsearch-reporter {
5 | hosts = [${cinnamon.elastic-hosts}]
6 | basic-auth {
7 | username = "elastic"
8 | username = ${?CINNAMON_ELASTIC_USERNAME}
9 | password = "changeme"
10 | password = ${?CINNAMON_ELASTIC_PASSWORD}
11 | }
12 | frequency = 10s
13 | }
14 | }
--------------------------------------------------------------------------------
/K8s/endpoints/endpoint-service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: endpoint
5 | spec:
6 | type: NodePort
7 | ports:
8 | - name: "8082"
9 | protocol: TCP
10 | port: 8082
11 | targetPort: 8082
12 | nodePort: 30082
13 | - name: akka-mgmt-http
14 | protocol: TCP
15 | port: 8558
16 | targetPort: akka-mgmt-http
17 | nodePort: 30559
18 | selector:
19 | tag: endpoint
20 |
--------------------------------------------------------------------------------
/docker-compose-cassandra.yml:
--------------------------------------------------------------------------------
1 |
2 | services:
3 | cassandra_db:
4 | hostname: cassandra-1
5 | image: cassandra:3.11.11
6 | command: /bin/bash -c "sleep 1 && echo ' -- Pausing to let system catch up ... -->' && /docker-entrypoint.sh cassandra -f"
7 | ports:
8 | - "7000:7000"
9 | - "7001:7001"
10 | - "7199:7199"
11 | - "9042:9042"
12 | - "9160:9160"
13 | # volumes:
14 | # - ./data/cassandra-1:/var/lib/cassandra:rw
15 |
16 |
--------------------------------------------------------------------------------
/docker-compose-postgresdb.yml:
--------------------------------------------------------------------------------
1 |
2 | networks:
3 | akka:
4 | driver: bridge
5 |
6 | services:
7 | postgres-db:
8 | image: postgres:latest
9 | volumes:
10 | - ./ddl-scripts/create_tables_postgres.sql:/docker-entrypoint-initdb.d/10-init.sql
11 | ports:
12 | - 5432:5432
13 | environment:
14 | POSTGRES_DB: postgres
15 | POSTGRES_USER: postgres
16 | POSTGRES_PASSWORD: postgres
17 | networks:
18 | - akka
19 |
--------------------------------------------------------------------------------
/microk8s/endpoints/endpoint-traefik-ingress.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.k8s.io/v1
2 | kind: Ingress
3 | metadata:
4 | name: endpoint
5 | annotations:
6 | kubernetes.io/ingress.class: traefik
7 | spec:
8 | rules:
9 | - http:
10 | paths:
11 | - path: /
12 | pathType: Prefix
13 | backend:
14 | service:
15 | name: endpoint
16 | port:
17 | number: 8082
--------------------------------------------------------------------------------
/telemetry/values.yaml:
--------------------------------------------------------------------------------
1 | grafana:
2 | sidecar:
3 | datasources:
4 | enabled: true
5 | label: grafana_datasource
6 | searchNamespace: ALL
7 | dashboards:
8 | enabled: true
9 | label: grafana_dashboard
10 | searchNamespace: ALL
11 | ingress:
12 | enabled: true
13 | annotations:
14 | spec.ingressClassName : nginx
15 | hosts:
16 | - mk.local # add this to your /etc/hosts file with the actual "minikube ip"
--------------------------------------------------------------------------------
/telemetry/prometheus/akka-ep-scraping-targets.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: monitoring.coreos.com/v1
2 | kind: PodMonitor
3 | metadata:
4 | name: artifact-state-ep-scraping
5 | labels:
6 | app.kubernetes.io/instance: prometheus-operator
7 | release: prometheus-operator
8 | annotations:
9 | kubectl.kubernetes.io/last-applied-configuration: ""
10 | spec:
11 | selector:
12 | matchLabels:
13 | app: ArtifactStateEndpoint
14 | podMetricsEndpoints:
15 | - port: node-metrics
--------------------------------------------------------------------------------
/telemetry/prometheus/akka-node-scraping-targets.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: monitoring.coreos.com/v1
2 | kind: PodMonitor
3 | metadata:
4 | name: artifact-state-node-scraping
5 | labels:
6 | app.kubernetes.io/instance: prometheus-operator
7 | release: prometheus-operator
8 | annotations:
9 | kubectl.kubernetes.io/last-applied-configuration: ""
10 | spec:
11 | selector:
12 | matchLabels:
13 | app: ArtifactStateCluster
14 | podMetricsEndpoints:
15 | - port: node-metrics
--------------------------------------------------------------------------------
/telemetry/prometheus/akka-cinnmaon-datasource.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ConfigMap
3 | metadata:
4 | name: akka-cinnamon-datasource
5 | labels:
6 | grafana_datasource: "1"
7 | app: prometheus-operator-grafana
8 | data:
9 | akka-datasource.yaml: |-
10 | apiVersion: 1
11 | datasources:
12 | - name: Cinnamon Prometheus
13 | type: prometheus
14 | url: http://prometheus-operator-kube-p-prometheus.monitoring:9090
15 | access: proxy
16 | isDefault: false
17 |
--------------------------------------------------------------------------------
/project/plugins.sbt:
--------------------------------------------------------------------------------
1 | resolvers += "Akka library repository".at("https://repo.akka.io/maven")
2 |
3 | addSbtPlugin("com.github.sbt" % "sbt-multi-jvm" % "0.6.0")
4 |
5 | ThisBuild / libraryDependencySchemes ++= Seq(
6 | "org.scala-lang.modules" %% "scala-xml" % VersionScheme.Always
7 | )
8 |
9 | addSbtPlugin("com.lightbend.akka.grpc" % "sbt-akka-grpc" % "2.4.3")
10 |
11 | addSbtPlugin("com.lightbend.cinnamon" % "sbt-cinnamon" % "2.20.3")
12 |
13 | addSbtPlugin("com.github.sbt" % "sbt-native-packager" % "1.10.0")
--------------------------------------------------------------------------------
/gatling/README.md:
--------------------------------------------------------------------------------
1 | > [!IMPORTANT]
2 | > You'll need to initially send a single event to Cassandra to automatically create the needed schemas before giving it any load, otherwise persistent envents will fail.
3 | > For example,
4 |
5 | ```curl -d '{"artifactId":1, "userId":"Michael"}' -H "Content-Type: application/json" -X POST http://localhost:8082/artifactState/setArtifactReadByUser```
6 |
7 | > and then verify with:
8 |
9 | ```curl 'http://localhost:8082/artifactState/getAllStates?artifactId=1&userId=Michael' | python3 -m json.tool```
10 |
--------------------------------------------------------------------------------
/OpenShift-3.x/README.md:
--------------------------------------------------------------------------------
1 | # Deploying to Minishift / Openshift
2 |
3 | Prerequist: Install Lightbend Console
4 |
5 | 1. create a project w/
6 | ```oc new-project poc```
7 | 2. Deploy a single C* Node by downloading `https://github.com/keedio/openshift-cassandra` and then:
8 | ```
9 | oc apply -f template.yaml
10 | oc apply -f deploy.yaml
11 | ```
12 | 3. Deploy the two node types:
13 | ```
14 | oc apply -f Openshift/node-deployment.yaml
15 | oc apply -f Openshift/endpoint-deployment.yaml
16 | ```
17 |
18 |
19 | ### Update: 2021-08-04
20 | - NOTE: not tested w/ Openshift
--------------------------------------------------------------------------------
/OpenShift-4.1/README.md:
--------------------------------------------------------------------------------
1 | # Deploying to Minishift / Openshift
2 |
3 | Prerequist: Install Lightbend Console
4 |
5 | 1. create a project w/
6 | ```oc new-project poc```
7 | 2. Deploy a single C* Node by downloading `https://github.com/keedio/openshift-cassandra` and then:
8 | ```
9 | oc apply -f template.yaml
10 | oc apply -f deploy.yaml
11 | ```
12 | 3. Deploy the two node types:
13 | ```
14 | oc apply -f Openshift/node-deployment.yaml
15 | oc apply -f Openshift/endpoint-deployment.yaml
16 | ```
17 |
18 |
19 |
20 | ### Update: 2021-08-04
21 | - NOTE: not tested w/ Openshift
--------------------------------------------------------------------------------
/OpenShift-3.x/create-persistent-volume.yaml:
--------------------------------------------------------------------------------
1 | # to create a Persistant volume
2 | # minishift ssh
3 | #[docker@minishift ~]$ sudo -i
4 | # mkdir -p /mnt/sda1/var/lib/minishift/openshift.local.volumes/pv
5 | # mkdir /mnt/sda1/var/lib/minishift/openshift.local.volumes/pv/registry
6 | # chmod 777 -R /mnt/sda1/var/lib/minishift/openshift.local.volumes/pv
7 | # exit / exit
8 | # oc create -f create-persistent-volume.yaml
9 | apiVersion: v1
10 | kind: PersistentVolume
11 | metadata:
12 | name: app3-pv
13 | spec:
14 | capacity:
15 | storage: 5Gi
16 | accessModes:
17 | - ReadWriteOnce
18 | hostPath:
19 | path: /mnt/sda1/var/lib/minishift/openshift.local.volumes/pv/registry
20 |
--------------------------------------------------------------------------------
/OpenShift-4.1/create-persistent-volume.yaml:
--------------------------------------------------------------------------------
1 | # to create a Persistant volume
2 | # minishift ssh
3 | #[docker@minishift ~]$ sudo -i
4 | # mkdir -p /mnt/sda1/var/lib/minishift/openshift.local.volumes/pv
5 | # mkdir /mnt/sda1/var/lib/minishift/openshift.local.volumes/pv/registry
6 | # chmod 777 -R /mnt/sda1/var/lib/minishift/openshift.local.volumes/pv
7 | # exit / exit
8 | # oc create -f create-persistent-volume.yaml
9 | apiVersion: v1
10 | kind: PersistentVolume
11 | metadata:
12 | name: app3-pv
13 | spec:
14 | capacity:
15 | storage: 5Gi
16 | accessModes:
17 | - ReadWriteOnce
18 | hostPath:
19 | path: /mnt/sda1/var/lib/minishift/openshift.local.volumes/pv/registry
20 |
--------------------------------------------------------------------------------
/K8s/cassandra/cassandra-db-service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | annotations:
5 | kompose.cmd: kompose convert
6 | kompose.version: 1.16.0 (0c01309)
7 | creationTimestamp: null
8 | labels:
9 | io.kompose.service: cassandra-db
10 | name: cassandra-db
11 | spec:
12 | ports:
13 | - name: "7000"
14 | port: 7000
15 | targetPort: 7000
16 | - name: "7001"
17 | port: 7001
18 | targetPort: 7001
19 | - name: "7199"
20 | port: 7199
21 | targetPort: 7199
22 | - name: "9042"
23 | port: 9042
24 | targetPort: 9042
25 | - name: "9160"
26 | port: 9160
27 | targetPort: 9160
28 | selector:
29 | io.kompose.service: cassandra-db
30 | status:
31 | loadBalancer: {}
--------------------------------------------------------------------------------
/src/main/scala/com/lightbend/artifactstate/endpoint/JsonFormats.scala:
--------------------------------------------------------------------------------
1 | package com.lightbend.artifactstate.endpoint
2 |
3 | import com.lightbend.artifactstate.endpoint.ArtifactStatePocAPI._
4 | import spray.json.{DefaultJsonProtocol, RootJsonFormat}
5 |
6 | object JsonFormats {
7 | // import the default encoders for primitive types (Int, String, Lists etc)
8 | import DefaultJsonProtocol._
9 |
10 | implicit val userJsonFormat: RootJsonFormat[ArtifactAndUser] = jsonFormat2(ArtifactAndUser)
11 | implicit val psResponse: RootJsonFormat[ExtResponse] = jsonFormat4(ExtResponse)
12 | implicit val psResponseII: RootJsonFormat[AllStatesResponse] = jsonFormat5(AllStatesResponse)
13 | implicit val cmdResponse: RootJsonFormat[CommandResponse] = jsonFormat1(CommandResponse)
14 |
15 | }
16 |
17 |
--------------------------------------------------------------------------------
/src/main/resources/nonsup-endpoint-application-docker.conf:
--------------------------------------------------------------------------------
1 | include "nonsup-endpoint-application-base.conf"
2 |
3 | akka {
4 |
5 | remote {
6 | artery {
7 | canonical.port = 2552
8 | }
9 | }
10 |
11 | #discovery-config
12 | discovery {
13 | method = akka-dns
14 | }
15 | #discovery-config
16 |
17 | #management-config
18 | management.cluster.bootstrap {
19 | contact-point-discovery.service-name = cluster
20 | }
21 | #management-config
22 |
23 | cluster {
24 | roles=["endpoint", "dns"]
25 | shutdown-after-unsuccessful-join-seed-nodes = 40s
26 | }
27 |
28 | coordinated-shutdown.exit-jvm = on
29 |
30 | }
31 |
32 | clustering {
33 | ip = "127.0.0.1"
34 | ip = ${?CLUSTER_IP}
35 | ports = ${?CLUSTER_PORTS}
36 | defaultPort = 2552
37 |
38 | cluster.name = ArtifactStateCluster
39 | }
--------------------------------------------------------------------------------
/src/main/resources/nonsup-endpoint-application.conf:
--------------------------------------------------------------------------------
1 | include "nonsup-endpoint-application-base.conf"
2 |
3 | akka {
4 |
5 | remote {
6 | artery {
7 | canonical.hostname = ${clustering.ip}
8 | canonical.port = ${clustering.port}
9 | }
10 | }
11 |
12 |
13 | cluster {
14 | seed-nodes = [
15 | # "akka.tcp://"${clustering.cluster.name}"@"${clustering.seed-ip}":"${clustering.seed-port}
16 | # artery protocol
17 | "akka://"${clustering.cluster.name}"@"${clustering.seed-ip}":"${clustering.seed-port}
18 | ]
19 | }
20 |
21 | coordinated-shutdown.terminate-actor-system = on
22 |
23 | }
24 |
25 | clustering {
26 | ip = "127.0.0.1"
27 | port = 2551
28 | defaultPort = ${clustering.seed-port}
29 | seed-ip = "127.0.0.1"
30 | seed-port = 2552
31 | cluster.name = ArtifactStateCluster
32 | }
33 |
--------------------------------------------------------------------------------
/src/multi-jvm/scala/akka/remote/testkit/STMultiNodeSpec.scala:
--------------------------------------------------------------------------------
1 | package akka.remote.testkit
2 |
3 | import org.scalatest.matchers.should.Matchers
4 | import org.scalatest.BeforeAndAfterAll
5 | import org.scalatest.wordspec.AnyWordSpecLike;
6 | /**
7 | * Hooks up MultiNodeSpec with ScalaTest
8 | */
9 | trait STMultiNodeSpec extends MultiNodeSpecCallbacks with AnyWordSpecLike with Matchers with BeforeAndAfterAll {
10 | self: MultiNodeSpec =>
11 |
12 | override def beforeAll() = multiNodeSpecBeforeAll()
13 |
14 | override def afterAll() = multiNodeSpecAfterAll()
15 |
16 | // Might not be needed anymore if we find a nice way to tag all logging from a node
17 | override implicit def convertToWordSpecStringWrapper(s: String): WordSpecStringWrapper =
18 | new WordSpecStringWrapper(s"$s (on node '${self.myself.name}', $getClass)")
19 | }
20 |
--------------------------------------------------------------------------------
/src/main/resources/nonsup-endpoint-application-docker-dns.conf:
--------------------------------------------------------------------------------
1 | include "nonsup-endpoint-application-base.conf"
2 |
3 | akka {
4 |
5 | diagnostics {
6 | recorder.enabled = off
7 | management.enabled = true
8 | }
9 |
10 | remote {
11 | artery {
12 | canonical.port = 2552
13 | }
14 | }
15 |
16 | #discovery-config
17 | discovery {
18 | method = akka-dns
19 | }
20 | #discovery-config
21 |
22 | #management-config
23 | management.cluster.bootstrap {
24 | contact-point-discovery.service-name = cluster
25 | }
26 | #management-config
27 |
28 | cluster {
29 | roles=["endpoint", "dns"]
30 | shutdown-after-unsuccessful-join-seed-nodes = 40s
31 | }
32 |
33 | coordinated-shutdown.exit-jvm = on
34 |
35 | }
36 |
37 | clustering {
38 | port = 2552
39 | defaultPort = ${clustering.port}
40 | }
41 |
42 |
43 |
--------------------------------------------------------------------------------
/src/main/resources/endpoint-application.conf:
--------------------------------------------------------------------------------
1 | include "endpoint-application-base.conf"
2 | include "telemetry-graphite.conf"
3 |
4 | akka {
5 |
6 | remote {
7 | artery {
8 | canonical.hostname = ${clustering.ip}
9 | canonical.port = ${clustering.port}
10 | }
11 | }
12 |
13 |
14 | cluster {
15 | seed-nodes = [
16 | # "akka.tcp://"${clustering.cluster.name}"@"${clustering.seed-ip}":"${clustering.seed-port}
17 | # artery protocol
18 | "akka://"${clustering.cluster.name}"@"${clustering.seed-ip}":"${clustering.seed-port}
19 | ]
20 | }
21 |
22 | coordinated-shutdown.terminate-actor-system = on
23 |
24 | }
25 |
26 | clustering {
27 | ip = "127.0.0.1"
28 | port = 2551
29 | defaultPort = ${clustering.seed-port}
30 | seed-ip = "127.0.0.1"
31 | seed-port = 2552
32 | cluster.name = ArtifactStateCluster
33 | }
34 |
--------------------------------------------------------------------------------
/src/main/resources/endpoint-application-docker-dns.conf:
--------------------------------------------------------------------------------
1 | include "endpoint-application-base.conf"
2 | include "telemetry-graphite.conf"
3 |
4 | akka {
5 |
6 | diagnostics {
7 | recorder.enabled = off
8 | management.enabled = true
9 | }
10 |
11 | remote {
12 | artery {
13 | canonical.port = 2552
14 | }
15 | }
16 |
17 | #discovery-config
18 | discovery {
19 | method = akka-dns
20 | }
21 | #discovery-config
22 |
23 | #management-config
24 | management.cluster.bootstrap {
25 | contact-point-discovery.service-name = cluster
26 | }
27 | #management-config
28 |
29 | cluster {
30 | roles=["endpoint", "dns"]
31 | shutdown-after-unsuccessful-join-seed-nodes = 40s
32 | }
33 |
34 | coordinated-shutdown.exit-jvm = on
35 |
36 | }
37 |
38 | clustering {
39 | port = 2552
40 | defaultPort = ${clustering.port}
41 | }
42 |
43 |
44 |
--------------------------------------------------------------------------------
/src/main/scala/com/lightbend/artifactstate/endpoint/ArtifactStatePocAPI.scala:
--------------------------------------------------------------------------------
1 | package com.lightbend.artifactstate.endpoint
2 |
3 | // these are just for the JSON formats/external protocol/api
4 | object ArtifactStatePocAPI {
5 |
6 | final case class ArtifactAndUser(artifactId: Long, userId: String)
7 |
8 | sealed trait ExtResponses
9 | final case class ExtResponse(artifactId: Long, userId: String, answer: Option[Boolean], failureMsg: Option[String]) extends ExtResponses
10 | final case class AllStatesResponse(
11 | artifactId: Long,
12 | userId: String,
13 | artifactRead: Option[Boolean],
14 | artifactInUserFeed: Option[Boolean],
15 | failureMsg: Option[String]) extends ExtResponses
16 | final case class CommandResponse(success: Boolean) extends ExtResponses
17 |
18 | }
--------------------------------------------------------------------------------
/src/main/resources/endpoint-application-docker.conf:
--------------------------------------------------------------------------------
1 | include "endpoint-application-base.conf"
2 | include "telemetry-graphite.conf"
3 |
4 | akka {
5 |
6 | diagnostics {
7 | recorder.enabled = off
8 | management.enabled = true
9 | }
10 |
11 | remote {
12 | artery {
13 | canonical.port = 2552
14 | }
15 | }
16 |
17 | #discovery-config
18 | discovery {
19 | method = akka-dns
20 | }
21 | #discovery-config
22 |
23 | #management-config
24 | management.cluster.bootstrap {
25 | contact-point-discovery.service-name = cluster
26 | }
27 | #management-config
28 |
29 | cluster {
30 | roles=["endpoint", "dns"]
31 | shutdown-after-unsuccessful-join-seed-nodes = 40s
32 | }
33 |
34 | coordinated-shutdown.exit-jvm = on
35 |
36 | }
37 |
38 | clustering {
39 | ip = "127.0.0.1"
40 | ip = ${?CLUSTER_IP}
41 | ports = ${?CLUSTER_PORTS}
42 | defaultPort = 2552
43 |
44 | cluster.name = ArtifactStateCluster
45 | }
--------------------------------------------------------------------------------
/src/main/resources/nonsup-cluster-application-base.conf:
--------------------------------------------------------------------------------
1 |
2 | akka {
3 | loglevel = INFO
4 |
5 | actor {
6 | provider = cluster
7 | serialization-bindings {
8 | "com.lightbend.artifactstate.serializer.EventSerializeMarker" = jackson-json
9 | "com.lightbend.artifactstate.serializer.MsgSerializeMarker" = jackson-json
10 | }
11 | }
12 |
13 | remote {
14 | artery {
15 | enabled = on
16 | transport = tcp
17 | }
18 | }
19 |
20 | cluster {
21 | roles=["sharded"]
22 | sharding {
23 | number-of-shards = 30
24 | passivate-idle-entity-after = 2 minutes
25 | role = "sharded"
26 | }
27 | downing-provider-class = "akka.cluster.sbr.SplitBrainResolverProvider"
28 | split-brain-resolver.active-strategy=keep-majority
29 | split-brain-resolver.keep-majority {
30 | # if the 'role' is defined the decision is based only on members with that 'role'
31 | role = "sharded"
32 | }
33 | }
34 | }
35 |
36 | clustering {
37 | cluster.name = ArtifactStateCluster
38 | }
--------------------------------------------------------------------------------
/src/main/resources/cluster-application-base.conf:
--------------------------------------------------------------------------------
1 | include "telemetry.conf"
2 |
3 | akka {
4 | loglevel = INFO
5 |
6 | actor {
7 | provider = cluster
8 | serialization-bindings {
9 | "com.lightbend.artifactstate.serializer.EventSerializeMarker" = jackson-json
10 | "com.lightbend.artifactstate.serializer.MsgSerializeMarker" = jackson-json
11 | }
12 | }
13 |
14 | remote {
15 | artery {
16 | enabled = on
17 | transport = tcp
18 | }
19 | }
20 |
21 | cluster {
22 | roles=["sharded"]
23 | sharding {
24 | number-of-shards = 30
25 | passivate-idle-entity-after = 2 minutes
26 | role = "sharded"
27 | }
28 |
29 | downing-provider-class = "akka.cluster.sbr.SplitBrainResolverProvider"
30 | split-brain-resolver.active-strategy=keep-majority
31 | split-brain-resolver.keep-majority {
32 | # if the 'role' is defined the decision is based only on members with that 'role'
33 | role = "sharded"
34 | }
35 | }
36 | }
37 |
38 | clustering {
39 | cluster.name = ArtifactStateCluster
40 | }
--------------------------------------------------------------------------------
/client/build.sbt:
--------------------------------------------------------------------------------
1 |
2 | name := "artifact-state-scala-grpc-client"
3 |
4 | ThisBuild / version := "1.2"
5 | ThisBuild / scalaVersion := "2.13.14"
6 | ThisBuild / resolvers += "Akka library repository".at("https://repo.akka.io/maven")
7 |
8 | lazy val akkaHttpVersion = "10.6.3"
9 | lazy val akkaVersion = "2.9.5"
10 |
11 | enablePlugins(AkkaGrpcPlugin)
12 |
13 | libraryDependencies ++= Seq(
14 | "com.typesafe.akka" %% "akka-actor-typed" % akkaVersion,
15 | "com.typesafe.akka" %% "akka-stream" % akkaVersion,
16 | "com.typesafe.akka" %% "akka-discovery" % akkaVersion,
17 | "com.typesafe.akka" %% "akka-pki" % akkaVersion,
18 |
19 | // The Akka HTTP overwrites are required because Akka-gRPC depends on 10.1.x
20 | "com.typesafe.akka" %% "akka-http" % akkaHttpVersion,
21 | // "com.typesafe.akka" %% "akka-http2-support" % akkaHttpVersion,
22 |
23 | "com.typesafe.akka" %% "akka-actor-testkit-typed" % akkaVersion % Test,
24 | "com.typesafe.akka" %% "akka-stream-testkit" % akkaVersion % Test,
25 | "org.scalatest" %% "scalatest" % "3.1.1" % Test
26 | )
27 |
28 | run / fork := true
--------------------------------------------------------------------------------
/K8s/cassandra/cassandra-db-deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | annotations:
5 | kompose.cmd: kompose convert
6 | kompose.version: 1.16.0 (0c01309)
7 | creationTimestamp: null
8 | labels:
9 | io.kompose.service: cassandra-db
10 | name: cassandra-db
11 | spec:
12 | replicas: 1
13 | selector:
14 | matchLabels:
15 | io.kompose.service: cassandra-db
16 | strategy: {}
17 | template:
18 | metadata:
19 | creationTimestamp: null
20 | labels:
21 | io.kompose.service: cassandra-db
22 | spec:
23 | containers:
24 | - args:
25 | - /bin/bash
26 | - -c
27 | - sleep 1 && echo ' -- Pausing to let system catch up ... -->' && /docker-entrypoint.sh
28 | cassandra -f
29 | image: cassandra:3.11.11
30 | name: cassandra-db
31 | ports:
32 | - containerPort: 7000
33 | - containerPort: 7001
34 | - containerPort: 7199
35 | - containerPort: 9042
36 | - containerPort: 9160
37 | resources: {}
38 | hostname: cassandra-1
39 | restartPolicy: Always
40 | status: {}
--------------------------------------------------------------------------------
/telemetry/uninstall-akka-grafana-dashboards.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | kubectl -n monitoring delete cm akka-actors
3 | kubectl -n monitoring delete cm akka-ask-pattern
4 | kubectl -n monitoring delete cm akka-circuit-breakers
5 | kubectl -n monitoring delete cm akka-cluster
6 | kubectl -n monitoring delete cm akka-cluster-sharding
7 | kubectl -n monitoring delete cm akka-dispatchers
8 | kubectl -n monitoring delete cm akka-events
9 | kubectl -n monitoring delete cm akka-http-clients
10 | kubectl -n monitoring delete cm akka-http-endpoints
11 | kubectl -n monitoring delete cm akka-http-servers
12 | kubectl -n monitoring delete cm akka-persistence
13 | kubectl -n monitoring delete cm akka-projections
14 | kubectl -n monitoring delete cm akka-remote-actors
15 | kubectl -n monitoring delete cm akka-remote-nodes
16 | kubectl -n monitoring delete cm akka-routers
17 | kubectl -n monitoring delete cm akka-stopwatches
18 | kubectl -n monitoring delete cm akka-streams-extended
19 | kubectl -n monitoring delete cm akka-streams
20 | kubectl -n monitoring delete cm java-futures
21 | kubectl -n monitoring delete cm jvm-metrics
22 | kubectl -n monitoring delete cm scala-futures
23 |
--------------------------------------------------------------------------------
/src/main/resources/nonsup-endpoint-application-k8s.conf:
--------------------------------------------------------------------------------
1 | include "nonsup-endpoint-application-base.conf"
2 |
3 | akka {
4 |
5 | remote {
6 | artery {
7 | canonical.port = 2552
8 | }
9 | }
10 |
11 | #discovery-config
12 | discovery {
13 | kubernetes-api {
14 | # pod-namespace = "poc"
15 | # pod-port-name="akka-mgmt-http"
16 | pod-label-selector = "app=%s"
17 | }
18 | }
19 | #discovery-config
20 |
21 | #management-config
22 | management {
23 | cluster.bootstrap {
24 | contact-point-discovery {
25 | # For the kubernetes API this value is substributed into the %s in pod-label-selector
26 | service-name = ${clustering.cluster.name}
27 | port-name = "akka-mgmt-http"
28 | # pick the discovery method you'd like to use:
29 | discovery-method = kubernetes-api
30 | }
31 | }
32 | }
33 | #management-config
34 |
35 | cluster {
36 | roles=["endpoint", "k8s"]
37 | shutdown-after-unsuccessful-join-seed-nodes = 40s
38 | }
39 |
40 | coordinated-shutdown.exit-jvm = on
41 |
42 | }
43 |
44 | clustering {
45 | port = 2552
46 | defaultPort = ${clustering.port}
47 | }
48 |
49 |
--------------------------------------------------------------------------------
/src/multi-jvm/scala/akka/cluster/typed/MultiNodeTypedClusterSpec.scala:
--------------------------------------------------------------------------------
1 | package akka.cluster.typed
2 |
3 | import java.util.concurrent.ConcurrentHashMap
4 | import akka.actor.{Address, Scheduler}
5 | import akka.actor.typed.ActorSystem
6 | import akka.remote.testkit.{MultiNodeSpec, STMultiNodeSpec}
7 | import org.scalatest.Suite
8 | import akka.actor.typed.scaladsl.adapter._
9 | import akka.cluster.ClusterEvent
10 | import akka.remote.testconductor.RoleName
11 | import org.scalatest.matchers.should.Matchers
12 |
13 |
14 | import scala.language.implicitConversions
15 |
16 | trait MultiNodeTypedClusterSpec
17 | extends Suite
18 | with STMultiNodeSpec
19 | // with WatchedByCoroner
20 | with Matchers {
21 | self: MultiNodeSpec =>
22 |
23 | override def initialParticipants: Int = roles.size
24 |
25 | implicit def typedSystem: ActorSystem[Nothing] = system.toTyped
26 | implicit def scheduler: Scheduler = system.scheduler
27 |
28 | private val cachedAddresses = new ConcurrentHashMap[RoleName, Address]
29 |
30 | def cluster: Cluster = Cluster(system.toTyped)
31 |
32 | def clusterView: ClusterEvent.CurrentClusterState = cluster.state
33 |
34 | }
35 |
--------------------------------------------------------------------------------
/src/main/resources/endpoint-application-k8s.conf:
--------------------------------------------------------------------------------
1 | include "endpoint-application-base.conf"
2 | include "telemetry-prometheus.conf"
3 |
4 | akka {
5 |
6 | remote {
7 | artery {
8 | canonical.port = 2552
9 | }
10 | }
11 |
12 | #discovery-config
13 | discovery {
14 | kubernetes-api {
15 | # pod-namespace = "poc"
16 | # pod-port-name="akka-mgmt-http"
17 | pod-label-selector = "app=%s"
18 | }
19 | }
20 | #discovery-config
21 |
22 | #management-config
23 | management {
24 | cluster.bootstrap {
25 | contact-point-discovery {
26 | # For the kubernetes API this value is substributed into the %s in pod-label-selector
27 | service-name = ${clustering.cluster.name}
28 | port-name = "akka-mgmt-http"
29 | # pick the discovery method you'd like to use:
30 | discovery-method = kubernetes-api
31 | }
32 | }
33 | }
34 | #management-config
35 |
36 | cluster {
37 | roles=["endpoint", "k8s"]
38 | shutdown-after-unsuccessful-join-seed-nodes = 40s
39 | }
40 |
41 | coordinated-shutdown.exit-jvm = on
42 |
43 | }
44 |
45 | clustering {
46 | port = 2552
47 | defaultPort = ${clustering.port}
48 | }
49 |
50 |
51 |
--------------------------------------------------------------------------------
/client/README.md:
--------------------------------------------------------------------------------
1 | # Client Examples
2 | > [!IMPORTANT]
3 | > You'll need to initially send a single event to Cassandra to automatically create the needed schemas before giving it any load, otherwise persistent envents will fail.
4 | > For example,
5 |
6 | ```curl -d '{"artifactId":1, "userId":"Michael"}' -H "Content-Type: application/json" -X POST http://localhost:8082/artifactState/setArtifactReadByUser```
7 |
8 | > and then verify with:
9 |
10 | ```curl 'http://localhost:8082/artifactState/getAllStates?artifactId=1&userId=Michael' | python3 -m json.tool```
11 |
12 | We’ve provided two examples for sending sensor data, that read from a file, into the gRPC ingress:
13 |
14 |
15 | - **SensorDataClientForEach** - illustrates a traditional request / response pattern for each *SensorData* sent to the ingress. For each request sent a *SensorDataReply* is returned as a response.
16 | - **SensorDataClientStream** - illustrates how to stream *SensorData* into the ingress as a stream, while receiving a separate stream of *SensorDataReply* responses.
17 |
18 | ## Running the examples with SBT:
19 |
20 | In a terminal enter the following command from the **client** directory:
21 |
22 | ```
23 | sbt run
24 | ```
25 | Then make your selection by entering a 1 or 2.
26 |
--------------------------------------------------------------------------------
/src/main/resources/nonsup-endpoint-application-base.conf:
--------------------------------------------------------------------------------
1 | akka {
2 | loglevel = INFO
3 |
4 | remote {
5 | artery {
6 | enabled = on
7 | transport = tcp
8 | }
9 | }
10 |
11 | actor {
12 | provider = "cluster"
13 | serialization-bindings {
14 | "com.lightbend.artifactstate.serializer.MsgSerializeMarker" = jackson-json
15 | }
16 | }
17 |
18 | cluster {
19 | roles=["endpoint"]
20 | sharding {
21 | number-of-shards = 30
22 | passivate-idle-entity-after = 2 minutes
23 | role = "sharded"
24 | }
25 | downing-provider-class = "akka.cluster.sbr.SplitBrainResolverProvider"
26 | split-brain-resolver.active-strategy=keep-majority
27 | split-brain-resolver.keep-majority {
28 | # if the 'role' is defined the decision is based only on members with that 'role'
29 | role = "sharded"
30 | }
31 | }
32 |
33 | coordinated-shutdown.terminate-actor-system = on
34 |
35 | http {
36 | server {
37 | default-http-port = 8082
38 | preview.enable-http2 = on
39 | }
40 | }
41 | }
42 |
43 | clustering {
44 | ip = "127.0.0.1"
45 | port = 2551
46 | defaultPort = 0
47 | seed-ip = "127.0.0.1"
48 | seed-port = 2552
49 | cluster.name = ArtifactStateCluster
50 | }
51 |
52 | app {
53 | # If ask takes more time than this to complete the request is failed
54 | routes.ask-timeout = 7s
55 | }
--------------------------------------------------------------------------------
/src/main/resources/logback.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 | %date{ISO8601} %-5level %logger{36} [%X{sourceThread}] - %msg%n
8 |
9 |
10 |
11 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
--------------------------------------------------------------------------------
/src/main/resources/nonsup-cluster-application.conf:
--------------------------------------------------------------------------------
1 | include "nonsup-cluster-application-base.conf"
2 |
3 | akka {
4 |
5 | remote {
6 | artery {
7 | canonical.hostname = ${clustering.ip}
8 | canonical.port = ${clustering.port}
9 | }
10 | }
11 |
12 | cluster {
13 | seed-nodes = [
14 | # "akka.tcp://"${clustering.cluster.name}"@"${clustering.seed-ip}":"${clustering.seed-port}
15 | # artery protocol
16 | "akka://"${clustering.cluster.name}"@"${clustering.seed-ip}":"${clustering.seed-port}
17 | ]
18 | }
19 |
20 | coordinated-shutdown.terminate-actor-system = on
21 |
22 | persistence {
23 | journal.plugin = "akka.persistence.cassandra.journal"
24 | snapshot-store.plugin = "akka.persistence.cassandra.snapshot"
25 | }
26 | }
27 |
28 | clustering {
29 | ip = "127.0.0.1"
30 | port = 2552
31 | defaultPort = ${clustering.port}
32 | seed-ip = "127.0.0.1"
33 | seed-port = 2552
34 | }
35 |
36 | # NOTE: autocreation of journal and snapshot should not be used in production
37 | akka.persistence.cassandra {
38 | journal {
39 | keyspace-autocreate = true
40 | tables-autocreate = true
41 | }
42 | snapshot {
43 | keyspace-autocreate = true
44 | tables-autocreate = true
45 | }
46 | }
47 | datastax-java-driver {
48 | advanced.reconnect-on-init = true
49 | basic.contact-points = ["localhost:9042"]
50 | basic.load-balancing-policy.local-datacenter = "datacenter1"
51 | }
--------------------------------------------------------------------------------
/src/main/resources/cluster-application.conf:
--------------------------------------------------------------------------------
1 | include "cluster-application-base.conf"
2 | include "telemetry-graphite.conf"
3 |
4 | akka {
5 |
6 | remote {
7 | artery {
8 | canonical.hostname = ${clustering.ip}
9 | canonical.port = ${clustering.port}
10 | }
11 | }
12 |
13 | cluster {
14 | seed-nodes = [
15 | # "akka.tcp://"${clustering.cluster.name}"@"${clustering.seed-ip}":"${clustering.seed-port}
16 | # artery protocol
17 | "akka://"${clustering.cluster.name}"@"${clustering.seed-ip}":"${clustering.seed-port}
18 | ]
19 | }
20 |
21 | coordinated-shutdown.terminate-actor-system = on
22 |
23 | persistence {
24 | journal.plugin = "akka.persistence.cassandra.journal"
25 | snapshot-store.plugin = "akka.persistence.cassandra.snapshot"
26 | }
27 | }
28 |
29 | clustering {
30 | ip = "127.0.0.1"
31 | port = 2552
32 | defaultPort = ${clustering.port}
33 | seed-ip = "127.0.0.1"
34 | seed-port = 2552
35 | }
36 |
37 | # NOTE: autocreation of journal and snapshot should not be used in production
38 | akka.persistence.cassandra {
39 | journal {
40 | keyspace-autocreate = true
41 | tables-autocreate = true
42 | }
43 | snapshot {
44 | keyspace-autocreate = true
45 | tables-autocreate = true
46 | }
47 | }
48 | datastax-java-driver {
49 | advanced.reconnect-on-init = true
50 | basic.contact-points = ["localhost:9042"]
51 | basic.load-balancing-policy.local-datacenter = "datacenter1"
52 | }
--------------------------------------------------------------------------------
/src/main/resources/endpoint-application-base.conf:
--------------------------------------------------------------------------------
1 | include "telemetry.conf"
2 |
3 | akka {
4 | loglevel = INFO
5 |
6 | remote {
7 | artery {
8 | enabled = on
9 | transport = tcp
10 | }
11 | }
12 |
13 | actor {
14 | provider = "cluster"
15 | serialization-bindings {
16 | "com.lightbend.artifactstate.serializer.MsgSerializeMarker" = jackson-json
17 | }
18 | }
19 |
20 | cluster {
21 | roles=["endpoint"]
22 | sharding {
23 | number-of-shards = 30
24 | passivate-idle-entity-after = 2 minutes
25 | role = "sharded"
26 | }
27 | # downing-provider-class = "com.lightbend.akka.sbr.SplitBrainResolverProvider" // pre akka 2.6.6
28 | downing-provider-class = "akka.cluster.sbr.SplitBrainResolverProvider"
29 | split-brain-resolver.active-strategy=keep-majority
30 | split-brain-resolver.keep-majority {
31 | # if the 'role' is defined the decision is based only on members with that 'role'
32 | role = "sharded"
33 | }
34 | }
35 |
36 | coordinated-shutdown.terminate-actor-system = on
37 |
38 | http {
39 | server {
40 | default-http-port = 8082
41 | preview.enable-http2 = on
42 | }
43 | }
44 | }
45 |
46 | clustering {
47 | ip = "127.0.0.1"
48 | port = 2551
49 | defaultPort = 0
50 | seed-ip = "127.0.0.1"
51 | seed-port = 2552
52 | cluster.name = ArtifactStateCluster
53 | }
54 |
55 | app {
56 | # If ask takes more time than this to complete the request is failed
57 | routes.ask-timeout = 7s
58 | }
--------------------------------------------------------------------------------
/src/main/scala/com/lightbend/artifactstate/actors/ClusterListenerActor.scala:
--------------------------------------------------------------------------------
1 | package com.lightbend.artifactstate.actors
2 |
3 | import akka.actor.typed.scaladsl.Behaviors
4 | import akka.actor.typed.{Behavior}
5 | import akka.cluster.ClusterEvent.{ClusterDomainEvent, MemberRemoved, MemberUp, UnreachableMember}
6 | import akka.cluster.typed.{Cluster, Subscribe}
7 |
8 | object ClusterListenerActor {
9 |
10 | def apply(): Behavior[ClusterDomainEvent] =
11 | Behaviors.setup[ClusterDomainEvent] { context =>
12 |
13 | val cluster = Cluster(context.system)
14 | cluster.subscriptions ! Subscribe(context.self.ref, classOf[ClusterDomainEvent])
15 |
16 | context.log.info(s"started actor ${context.self.path} - (${context.self.getClass})")
17 |
18 | def running(): Behavior[ClusterDomainEvent] =
19 | Behaviors.receive { (context, message) =>
20 | message match {
21 | case MemberUp(member) =>
22 | context.log.info("Member is Up: {}", member.address)
23 | Behaviors.same
24 | case UnreachableMember(member) =>
25 | context.log.info("Member detected as unreachable: {}", member)
26 | Behaviors.same
27 | case MemberRemoved(member, previousStatus) =>
28 | context.log.info(
29 | "Member is Removed: {} after {}",
30 | member.address, previousStatus)
31 | Behaviors.same
32 | case _ =>
33 | Behaviors.same // ignore
34 | }
35 | }
36 |
37 | running()
38 | }
39 |
40 | }
--------------------------------------------------------------------------------
/nonsup-docker-compose.yml:
--------------------------------------------------------------------------------
1 |
2 | services:
3 | cassandra-db:
4 | hostname: cassandra-1
5 | image: cassandra:3.11.11
6 | command: /bin/bash -c "sleep 1 && echo ' -- Pausing to let system catch up ... -->' && /docker-entrypoint.sh cassandra -f"
7 | ports:
8 | - "7000:7000"
9 | - "7001:7001"
10 | - "7199:7199"
11 | - "9042:9042"
12 | - "9160:9160"
13 | # volumes:
14 | # - ./data/cassandra-1:/var/lib/cassandra:rw
15 | networks:
16 | - statepoc
17 | healthcheck:
18 | test: ["CMD", "cqlsh", "-u cassandra", "-p cassandra" ,"-e describe keyspaces"]
19 | interval: 15s
20 | timeout: 10s
21 | retries: 10
22 |
23 | cluster:
24 | image: akka-typed-blog-distributed-state/cluster:1.2.0
25 | deploy:
26 | replicas: 3
27 | links:
28 | - cassandra-db
29 | # depends_on:
30 | # - cassandra-db
31 | environment:
32 | JAVA_OPTS: "-Dconfig.resource=nonsup-cluster-application-docker.conf"
33 | CLUSTER_IP: cluster
34 | CASSANDRA_CONTACT_POINT1: cassandra-db:9042
35 | networks:
36 | - statepoc
37 |
38 | endpoint:
39 | image: akka-typed-blog-distributed-state/cluster:1.2.0
40 | links:
41 | - cluster
42 | depends_on:
43 | - cluster
44 | environment:
45 | JAVA_OPTS: "-Dconfig.resource=nonsup-endpoint-application-docker.conf"
46 | CLUSTER_IP: endpoint
47 | ports:
48 | - "8082:8082"
49 | - "8558:8558"
50 | networks:
51 | - statepoc
52 |
53 | networks:
54 | statepoc:
55 | driver: bridge
56 |
--------------------------------------------------------------------------------
/microk8s/endpoints/endpoint-deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: endpoint
5 | spec:
6 | replicas: 1
7 | selector:
8 | matchLabels:
9 | app: endpoint
10 | strategy: {}
11 | template:
12 | metadata:
13 | creationTimestamp: null
14 | labels:
15 | app: endpoint
16 | tag: endpoint
17 | annotations:
18 | prometheus.io/scrape: 'true'
19 | spec:
20 | serviceAccountName: endpoints-sa
21 | containers:
22 | - env:
23 | - name: JAVA_OPTS
24 | value: "-Dconfig.resource=endpoint-application-k8s.conf"
25 | image: localhost:32000/akka-typed-blog-distributed-state/cluster:0.1.4
26 | #health
27 | readinessProbe:
28 | httpGet:
29 | path: /ready
30 | port: akka-mgmt-http
31 | initialDelaySeconds: 10
32 | periodSeconds: 5
33 | livenessProbe:
34 | httpGet:
35 | path: /alive
36 | port: akka-mgmt-http
37 | initialDelaySeconds: 90
38 | periodSeconds: 30
39 | #health
40 | name: endpoint
41 | ports:
42 | - containerPort: 8082
43 | # akka remoting
44 | - name: remoting
45 | containerPort: 2552
46 | protocol: TCP
47 | # external http
48 | - name: akka-mgmt-http
49 | containerPort: 8558
50 | protocol: TCP
51 | - name: node-metrics
52 | containerPort: 9001
53 | resources: {}
54 | restartPolicy: Always
55 | status: {}
56 |
--------------------------------------------------------------------------------
/src/main/resources/nonsup-cluster-application-docker.conf:
--------------------------------------------------------------------------------
1 | include "nonsup-cluster-application-base.conf"
2 |
3 | akka {
4 |
5 | remote {
6 | artery {
7 | canonical.port = 2552
8 | }
9 | }
10 |
11 | #discovery-config
12 | discovery {
13 | method = akka-dns
14 | }
15 | #discovery-config
16 |
17 | #management-config
18 | management.cluster.bootstrap {
19 | contact-point-discovery {
20 | service-name = cluster
21 | contact-with-all-contact-points = false
22 | }
23 | }
24 | #management-config
25 |
26 | cluster {
27 | roles=["sharded", "dns"]
28 | shutdown-after-unsuccessful-join-seed-nodes = 40s
29 | }
30 |
31 | coordinated-shutdown.exit-jvm = on
32 |
33 | persistence {
34 | journal.plugin = "akka.persistence.cassandra.journal"
35 | snapshot-store.plugin = "akka.persistence.cassandra.snapshot"
36 | }
37 | }
38 |
39 | clustering {
40 | ip = ""
41 | port = 2552
42 | defaultPort = ${clustering.port}
43 | cluster.name = ArtifactStateCluster
44 | cassandra.contactpoint1 = ${?CASSANDRA_CONTACT_POINT1}
45 | }
46 |
47 | # NOTE: autocreation of journal and snapshot should not be used in production
48 | akka.persistence.cassandra {
49 | journal {
50 | keyspace-autocreate = true
51 | tables-autocreate = true
52 | }
53 | snapshot {
54 | keyspace-autocreate = true
55 | tables-autocreate = true
56 | }
57 | }
58 | datastax-java-driver {
59 | advanced.reconnect-on-init = true
60 | basic.contact-points = [${clustering.cassandra.contactpoint1}]
61 | basic.load-balancing-policy.local-datacenter = "datacenter1"
62 | }
--------------------------------------------------------------------------------
/K8s/endpoints/endpoint-deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: endpoint
5 | spec:
6 | replicas: 1
7 | selector:
8 | matchLabels:
9 | app: ArtifactStateEndpoint
10 | tag: endpoint
11 | strategy: {}
12 | template:
13 | metadata:
14 | creationTimestamp: null
15 | labels:
16 | app: ArtifactStateEndpoint
17 | tag: endpoint
18 | annotations:
19 | prometheus.io/scrape: 'true'
20 | spec:
21 | serviceAccountName: endpoints-sa
22 | containers:
23 | - env:
24 | - name: JAVA_OPTS
25 | value: "-Dconfig.resource=endpoint-application-k8s.conf"
26 | image: akka-typed-blog-distributed-state/cluster:1.2.0
27 | #health
28 | readinessProbe:
29 | httpGet:
30 | path: /ready
31 | port: akka-mgmt-http
32 | initialDelaySeconds: 10
33 | periodSeconds: 5
34 | livenessProbe:
35 | httpGet:
36 | path: /alive
37 | port: akka-mgmt-http
38 | initialDelaySeconds: 90
39 | periodSeconds: 30
40 | #health
41 | name: endpoint
42 | ports:
43 | - containerPort: 8082
44 | # akka remoting
45 | - name: remoting
46 | containerPort: 2552
47 | protocol: TCP
48 | # external http
49 | - name: akka-mgmt-http
50 | containerPort: 8558
51 | protocol: TCP
52 | - name: node-metrics
53 | containerPort: 9001
54 | resources: {}
55 | restartPolicy: Always
56 | status: {}
57 |
--------------------------------------------------------------------------------
/src/main/resources/nonsup-cluster-application-docker-dns.conf:
--------------------------------------------------------------------------------
1 | include "nonsup-cluster-application-base.conf"
2 |
3 | akka {
4 |
5 | diagnostics {
6 | recorder.enabled = off
7 | management.enabled = true
8 | }
9 |
10 | remote {
11 | artery {
12 | canonical.port = 2552
13 | }
14 | }
15 |
16 | #discovery-config
17 | discovery {
18 | method = akka-dns
19 | }
20 | #discovery-config
21 |
22 | #management-config
23 | management.cluster.bootstrap {
24 | contact-point-discovery.service-name = cluster
25 | }
26 | #management-config
27 |
28 | cluster {
29 | roles=["sharded", "dns"]
30 | shutdown-after-unsuccessful-join-seed-nodes = 40s
31 | }
32 |
33 | coordinated-shutdown.exit-jvm = on
34 |
35 | persistence {
36 |
37 | journal.plugin = "akka.persistence.r2dbc.journal"
38 | snapshot-store.plugin = "akka.persistence.r2dbc.snapshot"
39 | state.plugin = "akka.persistence.r2dbc.durable-state-store"
40 |
41 | r2dbc {
42 | dialect = "yugabyte"
43 | connection-factory {
44 | driver = "postgres"
45 | host = "localhost"
46 | host = ${?DB_HOST}
47 | port = 5433
48 | database = "yugabyte"
49 | database = ${?DB_NAME}
50 | user = "yugabyte"
51 | host = ${?DB_USER}
52 | password = "yugabyte"
53 | password = ${?DB_PASSWORD}
54 |
55 | # ssl {
56 | # enabled = on
57 | # mode = "VERIFY_CA"
58 | # root-cert = "/path/db_root.crt"
59 | # }
60 | }
61 | }
62 | }
63 | }
64 |
65 | clustering {
66 | ip = ""
67 | port = 2552
68 | defaultPort = ${clustering.port}
69 | }
70 |
--------------------------------------------------------------------------------
/K8s/nodes/node-deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: node
5 | spec:
6 | replicas: 3
7 | selector:
8 | matchLabels:
9 | app: ArtifactStateCluster
10 | template:
11 | metadata:
12 | labels:
13 | app: ArtifactStateCluster
14 | tag: clusternode
15 | annotations:
16 | prometheus.io/scrape: 'true'
17 | spec:
18 | serviceAccountName: nodes-sa
19 | containers:
20 | - name: node
21 | image: akka-typed-blog-distributed-state/cluster:1.2.0
22 | #health
23 | readinessProbe:
24 | httpGet:
25 | path: /ready
26 | port: akka-mgmt-http
27 | initialDelaySeconds: 10
28 | periodSeconds: 5
29 | livenessProbe:
30 | httpGet:
31 | path: /alive
32 | port: akka-mgmt-http
33 | initialDelaySeconds: 90
34 | periodSeconds: 30
35 | #health
36 | env:
37 | - name: HOSTNAME
38 | valueFrom:
39 | fieldRef:
40 | apiVersion: v1
41 | fieldPath: status.podIP
42 | - name: CASSANDRA_CONTACT_POINT1
43 | value: "cassandra-db:9042"
44 | - name: JAVA_OPTS
45 | value: "-Dconfig.resource=cluster-application-k8s.conf"
46 | ports:
47 | # akka remoting
48 | - name: remoting
49 | containerPort: 2552
50 | protocol: TCP
51 | # external http
52 | - name: akka-mgmt-http
53 | containerPort: 8558
54 | protocol: TCP
55 | - name: node-metrics
56 | containerPort: 9001
57 | restartPolicy: Always
58 |
59 |
--------------------------------------------------------------------------------
/microk8s/nodes/node-deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: node
5 | spec:
6 | replicas: 3
7 | selector:
8 | matchLabels:
9 | app: ArtifactStateCluster
10 | template:
11 | metadata:
12 | labels:
13 | app: ArtifactStateCluster
14 | tag: clusternode
15 | annotations:
16 | prometheus.io/scrape: 'true'
17 | spec:
18 | serviceAccountName: nodes-sa
19 | containers:
20 | - name: node
21 | image: localhost:32000/akka-typed-blog-distributed-state/cluster:0.1.4
22 | #health
23 | readinessProbe:
24 | httpGet:
25 | path: /ready
26 | port: akka-mgmt-http
27 | initialDelaySeconds: 10
28 | periodSeconds: 5
29 | livenessProbe:
30 | httpGet:
31 | path: /alive
32 | port: akka-mgmt-http
33 | initialDelaySeconds: 90
34 | periodSeconds: 30
35 | #health
36 | env:
37 | - name: HOSTNAME
38 | valueFrom:
39 | fieldRef:
40 | apiVersion: v1
41 | fieldPath: status.podIP
42 | - name: CASSANDRA_CONTACT_POINT1
43 | value: "cassandra-db:9042"
44 | - name: JAVA_OPTS
45 | value: "-Dconfig.resource=cluster-application-k8s.conf"
46 | ports:
47 | # akka remoting
48 | - name: remoting
49 | containerPort: 2552
50 | protocol: TCP
51 | # external http
52 | - name: akka-mgmt-http
53 | containerPort: 8558
54 | protocol: TCP
55 | - name: node-metrics
56 | containerPort: 9001
57 | restartPolicy: Always
58 |
59 |
--------------------------------------------------------------------------------
/src/main/resources/cluster-application-docker.conf:
--------------------------------------------------------------------------------
1 | include "cluster-application-base.conf"
2 | include "telemetry-graphite.conf"
3 |
4 | akka {
5 | diagnostics {
6 | recorder.enabled = off
7 | management.enabled = true
8 | }
9 |
10 | remote {
11 | artery {
12 | canonical.port = 2552
13 | }
14 | }
15 |
16 | #discovery-config
17 | discovery {
18 | method = akka-dns
19 | }
20 | #discovery-config
21 |
22 | #management-config
23 | management.cluster.bootstrap {
24 | contact-point-discovery {
25 | service-name = cluster
26 | contact-with-all-contact-points = false
27 | }
28 | }
29 | #management-config
30 |
31 | cluster {
32 | roles=["sharded", "dns"]
33 | shutdown-after-unsuccessful-join-seed-nodes = 40s
34 | }
35 |
36 | coordinated-shutdown.exit-jvm = on
37 |
38 | persistence {
39 | journal.plugin = "akka.persistence.cassandra.journal"
40 | snapshot-store.plugin = "akka.persistence.cassandra.snapshot"
41 | }
42 |
43 | }
44 |
45 | clustering {
46 | ip = ""
47 | port = 2552
48 | defaultPort = ${clustering.port}
49 | cluster.name = ArtifactStateCluster
50 | cassandra.contactpoint1 = ${?CASSANDRA_CONTACT_POINT1}
51 | }
52 |
53 | # NOTE: autocreation of journal and snapshot should not be used in production
54 | akka.persistence.cassandra {
55 | journal {
56 | keyspace-autocreate = true
57 | tables-autocreate = true
58 | }
59 | snapshot {
60 | keyspace-autocreate = true
61 | tables-autocreate = true
62 | }
63 | }
64 | datastax-java-driver {
65 | advanced.reconnect-on-init = true
66 | basic.contact-points = [${clustering.cassandra.contactpoint1}]
67 | basic.load-balancing-policy.local-datacenter = "datacenter1"
68 | }
--------------------------------------------------------------------------------
/src/main/resources/cluster-application-docker-dns.conf:
--------------------------------------------------------------------------------
1 | include "cluster-application-base.conf"
2 | include "telemetry-graphite.conf"
3 |
4 | akka {
5 |
6 | diagnostics {
7 | recorder.enabled = off
8 | management.enabled = true
9 | }
10 |
11 | remote {
12 | artery {
13 | canonical.port = 2552
14 | }
15 | }
16 |
17 | #discovery-config
18 | discovery {
19 | method = akka-dns
20 | }
21 | #discovery-config
22 |
23 | #management-config
24 | management.cluster.bootstrap {
25 | contact-point-discovery {
26 | service-name = cluster
27 | contact-with-all-contact-points = false
28 | }
29 | }
30 | #management-config
31 |
32 | cluster {
33 | roles=["sharded", "dns"]
34 | shutdown-after-unsuccessful-join-seed-nodes = 40s
35 | }
36 |
37 | coordinated-shutdown.exit-jvm = on
38 |
39 | persistence {
40 |
41 | journal.plugin = "akka.persistence.r2dbc.journal"
42 | snapshot-store.plugin = "akka.persistence.r2dbc.snapshot"
43 | state.plugin = "akka.persistence.r2dbc.durable-state-store"
44 |
45 | r2dbc {
46 | dialect = "yugabyte"
47 | connection-factory {
48 | driver = "postgres"
49 | host = "localhost"
50 | host = ${?DB_HOST}
51 | port = 5433
52 | database = "yugabyte"
53 | database = ${?DB_NAME}
54 | user = "yugabyte"
55 | host = ${?DB_USER}
56 | password = "yugabyte"
57 | password = ${?DB_PASSWORD}
58 |
59 | # ssl {
60 | # enabled = on
61 | # mode = "VERIFY_CA"
62 | # root-cert = "/path/db_root.crt"
63 | # }
64 | }
65 | }
66 | }
67 | }
68 |
69 | clustering {
70 | ip = ""
71 | port = 2552
72 | defaultPort = ${clustering.port}
73 | }
74 |
--------------------------------------------------------------------------------
/src/main/protobuf/ArtifactState.proto:
--------------------------------------------------------------------------------
1 | syntax = "proto3";
2 |
3 | option java_package = "com.lightbend.artifactstate.endpoint";
4 | option java_outer_classname = "ArtifactStateProto";
5 |
6 | // The ArtifactStateService service definition.
7 | service ArtifactStateService {
8 | // queries
9 | rpc IsArtifactReadByUser (ArtifactAndUser) returns (ExtResponse) {}
10 |
11 | rpc IsArtifactInUserFeed (ArtifactAndUser) returns (ExtResponse) {}
12 |
13 | rpc GetAllStates (ArtifactAndUser) returns (AllStatesResponse) {}
14 |
15 | // commands
16 | rpc SetArtifactReadByUser (ArtifactAndUser) returns (CommandResponse) {}
17 | rpc SetArtifactAddedToUserFeed (ArtifactAndUser) returns (CommandResponse) {}
18 | rpc SetArtifactRemovedFromUserFeed (ArtifactAndUser) returns (CommandResponse) {}
19 |
20 | rpc CommandsStreamed (stream ArtifactCommand) returns (stream StreamedResponse) {}
21 | }
22 |
23 | message ArtifactAndUser {
24 | uint64 artifactId = 1;
25 | string userId = 2;
26 | }
27 |
28 | message ExtResponse {
29 | uint64 artifactId = 1;
30 | string userId = 2;
31 | bool answer = 3;
32 | string failureMsg = 4;
33 | }
34 |
35 | message AllStatesResponse {
36 | uint64 artifactId = 1;
37 | string userId = 2;
38 | bool artifactRead = 3;
39 | bool artifactInUserFeed = 4;
40 | string failureMsg = 5;
41 | }
42 |
43 | message CommandResponse {
44 | bool success = 1;
45 | }
46 |
47 | message ArtifactCommand {
48 | uint64 artifactId = 1;
49 | string userId = 2;
50 | string command = 3; // SetArtifactReadByUser, SetArtifactAddedToUserFeed, SetArtifactRemovedFromUserFeed
51 | }
52 |
53 | message StreamedResponse {
54 | bool success = 1;
55 | string failureMsg = 2;
56 | ArtifactCommand command = 3;
57 | }
58 |
--------------------------------------------------------------------------------
/src/main/resources/nonsup-cluster-application-k8s.conf:
--------------------------------------------------------------------------------
1 | include "nonsup-cluster-application-base.conf"
2 |
3 | akka {
4 |
5 | remote {
6 | artery {
7 | canonical.port = 2552
8 | }
9 | }
10 |
11 | #discovery-config
12 | discovery {
13 | kubernetes-api {
14 | pod-label-selector = "app=%s"
15 | }
16 | }
17 | #discovery-config
18 |
19 | #management-config
20 | management {
21 | cluster.bootstrap {
22 | contact-point-discovery {
23 | # For the kubernetes API this value is substributed into the %s in pod-label-selector
24 | service-name = ${clustering.cluster.name}
25 | port-name = "akka-mgmt-http"
26 | # pick the discovery method you'd like to use:
27 | discovery-method = kubernetes-api
28 | }
29 | }
30 | }
31 | #management-config
32 |
33 | cluster {
34 | roles=["sharded", "k8s"]
35 | shutdown-after-unsuccessful-join-seed-nodes = 40s
36 | }
37 |
38 | coordinated-shutdown.exit-jvm = on
39 |
40 | persistence {
41 | journal.plugin = "akka.persistence.cassandra.journal"
42 | snapshot-store.plugin = "akka.persistence.cassandra.snapshot"
43 | }
44 |
45 | }
46 |
47 | clustering {
48 | cassandra.contactpoint1 = ${?CASSANDRA_CONTACT_POINT1}
49 | ip = ""
50 | port = 2552
51 | defaultPort = ${clustering.port}
52 | }
53 |
54 | # NOTE: autocreation of journal and snapshot should not be used in production
55 | akka.persistence.cassandra {
56 | journal {
57 | keyspace-autocreate = true
58 | tables-autocreate = true
59 | }
60 | snapshot {
61 | keyspace-autocreate = true
62 | tables-autocreate = true
63 | }
64 | }
65 | datastax-java-driver {
66 | advanced.reconnect-on-init = true
67 | basic.contact-points = [${clustering.cassandra.contactpoint1}]
68 | basic.load-balancing-policy.local-datacenter = "datacenter1"
69 | }
70 |
--------------------------------------------------------------------------------
/client/src/main/protobuf/ArtifactState.proto:
--------------------------------------------------------------------------------
1 | syntax = "proto3";
2 |
3 | option java_package = "com.lightbend.artifactstate.endpoint";
4 | option java_outer_classname = "ArtifactStateProto";
5 |
6 | // The ArtifactStateService service definition.
7 | service ArtifactStateService {
8 | // queries
9 | rpc IsArtifactReadByUser (ArtifactAndUser) returns (ExtResponse) {}
10 |
11 | rpc IsArtifactInUserFeed (ArtifactAndUser) returns (ExtResponse) {}
12 |
13 | rpc GetAllStates (ArtifactAndUser) returns (AllStatesResponse) {}
14 |
15 | // commands
16 | rpc SetArtifactReadByUser (ArtifactAndUser) returns (CommandResponse) {}
17 | rpc SetArtifactAddedToUserFeed (ArtifactAndUser) returns (CommandResponse) {}
18 | rpc SetArtifactRemovedFromUserFeed (ArtifactAndUser) returns (CommandResponse) {}
19 |
20 | rpc CommandsStreamed (stream ArtifactCommand) returns (stream StreamedResponse) {}
21 | }
22 |
23 | message ArtifactAndUser {
24 | uint64 artifactId = 1;
25 | string userId = 2;
26 | }
27 |
28 | message ExtResponse {
29 | uint64 artifactId = 1;
30 | string userId = 2;
31 | bool answer = 3;
32 | string failureMsg = 4;
33 | }
34 |
35 | message AllStatesResponse {
36 | uint64 artifactId = 1;
37 | string userId = 2;
38 | bool artifactRead = 3;
39 | bool artifactInUserFeed = 4;
40 | string failureMsg = 5;
41 | }
42 |
43 | message CommandResponse {
44 | bool success = 1;
45 | }
46 |
47 | message ArtifactCommand {
48 | uint64 artifactId = 1;
49 | string userId = 2;
50 | string command = 3; // SetArtifactReadByUser, SetArtifactAddedToUserFeed, SetArtifactRemovedFromUserFeed
51 | }
52 |
53 | message StreamedResponse {
54 | bool success = 1;
55 | string failureMsg = 2;
56 | ArtifactCommand command = 3;
57 | }
58 |
--------------------------------------------------------------------------------
/src/main/resources/cluster-application-k8s.conf:
--------------------------------------------------------------------------------
1 | include "cluster-application-base.conf"
2 | include "telemetry-prometheus.conf"
3 |
4 | akka {
5 |
6 | remote {
7 | artery {
8 | canonical.port = 2552
9 | }
10 | }
11 |
12 | #discovery-config
13 | discovery {
14 | kubernetes-api {
15 | pod-label-selector = "app=%s"
16 | }
17 | }
18 | #discovery-config
19 |
20 | #management-config
21 | management {
22 | cluster.bootstrap {
23 | contact-point-discovery {
24 | # For the kubernetes API this value is substributed into the %s in pod-label-selector
25 | service-name = ${clustering.cluster.name}
26 | port-name = "akka-mgmt-http"
27 | # pick the discovery method you'd like to use:
28 | discovery-method = kubernetes-api
29 | }
30 | }
31 | }
32 | #management-config
33 |
34 | cluster {
35 | roles=["sharded", "k8s"]
36 | shutdown-after-unsuccessful-join-seed-nodes = 40s
37 | }
38 |
39 | coordinated-shutdown.exit-jvm = on
40 |
41 | persistence {
42 | journal.plugin = "akka.persistence.cassandra.journal"
43 | snapshot-store.plugin = "akka.persistence.cassandra.snapshot"
44 | }
45 |
46 | }
47 |
48 | clustering {
49 | cassandra.contactpoint1 = ${?CASSANDRA_CONTACT_POINT1}
50 | ip = ""
51 | port = 2552
52 | defaultPort = ${clustering.port}
53 | }
54 |
55 | # NOTE: autocreation of journal and snapshot should not be used in production
56 | akka.persistence.cassandra {
57 | journal {
58 | keyspace-autocreate = true
59 | tables-autocreate = true
60 | }
61 | snapshot {
62 | keyspace-autocreate = true
63 | tables-autocreate = true
64 | }
65 | }
66 | datastax-java-driver {
67 | advanced.reconnect-on-init = true
68 | basic.contact-points = [${clustering.cassandra.contactpoint1}]
69 | basic.load-balancing-policy.local-datacenter = "datacenter1"
70 | }
71 |
--------------------------------------------------------------------------------
/DOCKER_DNS_YUGABYTE.md:
--------------------------------------------------------------------------------
1 | # Akka Cluster Bootstrap on Docker w/ DNS and Yugabyte
2 |
3 | ## Download and Run the ElasticSearch Telemetery Sandbox
4 | 1. First download ElasticSearch developer sandbox and unzip the developer sandbox scripts. You can do this in a terminal with:
5 | ```
6 | curl -O https://downloads.lightbend.com/cinnamon/sandbox/cinnamon-elasticsearch-docker-sandbox-2.17.0.zip
7 | unzip cinnamon-elasticsearch-docker-sandbox-2.17.0.zip
8 | ```
9 | 2. Switch into the `cinnamon-elasticsearch-docker-sandbox-2.17.0` directory in your terminal.
10 | ```
11 | cd cinnamon-elasticsearch-docker-sandbox-2.17.0
12 | ```
13 | 3. Start the Sandbox on Linux:
14 | ```
15 | docker-compose -f docker-compose.yml up
16 | ```
17 |
18 | ## Start the Docker cluster from Terminal Window
19 | Note: I'm finding that cluster formation on Docker isn't 100% reliable. I'm continuing to look into the problem.
20 | ### Build the image and publish to your local docker with sbt
21 |
22 | start `sbt` and then issue `docker:publishLocal`
23 | ```
24 | sbt
25 | sbt:akka-typed-distributed-state-blog> docker:publishLocal
26 | ```
27 |
28 | ### Watch it happen
29 | ```
30 | docker-compose --compatibility -f docker-compose-dns.yml up
31 | ```
32 |
33 | ### Start it in the background
34 | ```
35 | docker-compose --compatibility -f docker-compose-dns.yml up -d
36 | ```
37 |
38 | ## Create the required tables in Yugabyte DB
39 | 1. Connect to Yugabyte from another terminal window.
40 | ```
41 | docker exec -it yb-tserver-n1 /home/yugabyte/bin/ysqlsh -h yb-tserver-n1
42 | ```
43 | 2. Follow Creating the Schema [here](https://doc.akka.io/docs/akka-persistence-r2dbc/current/getting-started.html#creating-the-schema).
44 |
45 | ## Test it
46 | The port for API endpoint is exposed as localhost:8082, and Akka Mgmt as localhost:8558
47 |
48 | Discussion for the API can be found on [Part 1](https://www.lightbend.com/blog/how-to-distribute-application-state-with-akka-cluster-part-1-getting-started) under "Running the PoC locally"
--------------------------------------------------------------------------------
/docker-compose.yml:
--------------------------------------------------------------------------------
1 |
2 | services:
3 | cassandra-db:
4 | hostname: cassandra-1
5 | image: cassandra:3.11.11
6 | command: /bin/bash -c "sleep 1 && echo ' -- Pausing to let system catch up ... -->' && /docker-entrypoint.sh cassandra -f"
7 | ports:
8 | - "7000:7000"
9 | - "7001:7001"
10 | - "7199:7199"
11 | - "9042:9042"
12 | - "9160:9160"
13 | # volumes:
14 | # - ./data/cassandra-1:/var/lib/cassandra:rw
15 | networks:
16 | - statepoc
17 | healthcheck:
18 | test: ["CMD", "cqlsh", "-u cassandra", "-p cassandra" ,"-e describe keyspaces"]
19 | interval: 15s
20 | timeout: 10s
21 | retries: 10
22 |
23 | cluster:
24 | image: akka-typed-blog-distributed-state/cluster:1.2.0
25 | deploy:
26 | replicas: 3
27 | ports:
28 | - 8558-8567:8558 # akka-mgmt
29 | links:
30 | - cassandra-db
31 | # depends_on:
32 | # - cassandra-db
33 | environment:
34 | JAVA_OPTS: "-Dconfig.resource=cluster-application-docker.conf"
35 | CLUSTER_IP: cluster
36 | CASSANDRA_CONTACT_POINT1: cassandra-db:9042
37 | GRAPHITE-SANDBOX: cinnamon-graphite-docker-sandbox-2203-graphite-1
38 | networks:
39 | - cinnamon-graphite-docker-sandbox-2203_default
40 | - statepoc
41 |
42 | endpoint:
43 | image: akka-typed-blog-distributed-state/cluster:1.2.0
44 | links:
45 | - cluster
46 | depends_on:
47 | - cluster
48 | environment:
49 | JAVA_OPTS: "-Dconfig.resource=endpoint-application-docker.conf"
50 | CLUSTER_IP: endpoint
51 | GRAPHITE-SANDBOX: cinnamon-graphite-docker-sandbox-2203-graphite-1
52 | ports:
53 | - "8082:8082"
54 | - "8558:8558"
55 | networks:
56 | - cinnamon-graphite-docker-sandbox-2203_default
57 | - statepoc
58 |
59 | networks:
60 | #Note: this network name must match the version of the sandbox
61 | cinnamon-graphite-docker-sandbox-2203_default:
62 | external: true
63 |
64 | statepoc:
65 | driver: bridge
66 |
--------------------------------------------------------------------------------
/src/main/resources/telemetry.conf:
--------------------------------------------------------------------------------
1 | cinnamon {
2 | application = "akka-typed-blog-distributed-state"
3 |
4 | }
5 |
6 | cinnamon.akka.actors {
7 | "/user/*" {
8 | report-by = group
9 | }
10 | "sharded-group" {
11 | report-by = group
12 | includes = ["/system/sharding/ArtifactState/*"]
13 | # excludes = ["akka.cluster.sharding.Shard"]
14 | }
15 | }
16 |
17 | cinnamon.akka.http.servers {
18 | "*:*" {
19 | paths {
20 | "*" {
21 | metrics = on
22 | }
23 | }
24 | }
25 | }
26 |
27 | cinnamon.akka.dispatchers {
28 | basic-information {
29 | names = ["*"]
30 | }
31 | time-information {
32 | names = ["*"]
33 | }
34 | }
35 |
36 | cinnamon.akka.cluster {
37 | node-metrics = on
38 | shard-region-info = on
39 | domain-events = on
40 | member-events = on
41 | singleton-events = on
42 | split-brain-resolver-events = on
43 | }
44 |
45 | cinnamon.stopwatch {
46 | enabled = true
47 | }
48 | cinnamon.akka.stream.metrics {
49 | async-processing-time = on
50 | }
51 |
52 | cinnamon.akka {
53 | streams {
54 | "name:my-stream" {
55 | report-by = name
56 | flows = on
57 | }
58 | partial = on
59 | metrics {
60 | async-processing-time = on
61 | demand = on
62 | latency = on
63 | }
64 | }
65 | }
66 |
67 | cinnamon.jmx-importer {
68 | beans = [
69 | {
70 | query = "java.lang:type=GarbageCollector,name=*"
71 | attributes = [
72 | {
73 | attribute = "CollectionCount",
74 | metric-type = "GAUGE_LONG"
75 | },
76 | {
77 | attribute = "CollectionTime",
78 | metric-type = "GAUGE_LONG"
79 | }
80 | ]
81 | },
82 | {
83 | query = "java.lang:type=OperatingSystem"
84 | attributes = [
85 | {
86 | attribute = "SystemCpuLoad",
87 | metric-type = "GAUGE_DOUBLE"
88 | }
89 | ]
90 | },
91 | {
92 | query = "ArtifactStateCluster-0-metrics:type=open-connections,name=*"
93 | attributes = [
94 | {
95 | attribute = "Value",
96 | metric-type = "GAUGE_LONG"
97 | }
98 | ]
99 | }
100 | ]
101 | }
102 |
--------------------------------------------------------------------------------
/ddl-scripts/create_tables_postgres.sql:
--------------------------------------------------------------------------------
1 | CREATE TABLE IF NOT EXISTS event_journal(
2 | slice INT NOT NULL,
3 | entity_type VARCHAR(255) NOT NULL,
4 | persistence_id VARCHAR(255) NOT NULL,
5 | seq_nr BIGINT NOT NULL,
6 | db_timestamp timestamp with time zone NOT NULL,
7 |
8 | event_ser_id INTEGER NOT NULL,
9 | event_ser_manifest VARCHAR(255) NOT NULL,
10 | event_payload BYTEA NOT NULL,
11 |
12 | deleted BOOLEAN DEFAULT FALSE NOT NULL,
13 | writer VARCHAR(255) NOT NULL,
14 | adapter_manifest VARCHAR(255),
15 | tags TEXT ARRAY,
16 |
17 | meta_ser_id INTEGER,
18 | meta_ser_manifest VARCHAR(255),
19 | meta_payload BYTEA,
20 |
21 | PRIMARY KEY(persistence_id, seq_nr)
22 | );
23 |
24 | -- `event_journal_slice_idx` is only needed if the slice based queries are used
25 | CREATE INDEX IF NOT EXISTS event_journal_slice_idx ON event_journal(slice, entity_type, db_timestamp, seq_nr);
26 |
27 | CREATE TABLE IF NOT EXISTS snapshot(
28 | slice INT NOT NULL,
29 | entity_type VARCHAR(255) NOT NULL,
30 | persistence_id VARCHAR(255) NOT NULL,
31 | seq_nr BIGINT NOT NULL,
32 | db_timestamp timestamp with time zone,
33 | write_timestamp BIGINT NOT NULL,
34 | ser_id INTEGER NOT NULL,
35 | ser_manifest VARCHAR(255) NOT NULL,
36 | snapshot BYTEA NOT NULL,
37 | tags TEXT ARRAY,
38 | meta_ser_id INTEGER,
39 | meta_ser_manifest VARCHAR(255),
40 | meta_payload BYTEA,
41 |
42 | PRIMARY KEY(persistence_id)
43 | );
44 |
45 | -- `snapshot_slice_idx` is only needed if the slice based queries are used together with snapshot as starting point
46 | CREATE INDEX IF NOT EXISTS snapshot_slice_idx ON snapshot(slice, entity_type, db_timestamp);
47 |
48 | CREATE TABLE IF NOT EXISTS durable_state (
49 | slice INT NOT NULL,
50 | entity_type VARCHAR(255) NOT NULL,
51 | persistence_id VARCHAR(255) NOT NULL,
52 | revision BIGINT NOT NULL,
53 | db_timestamp timestamp with time zone NOT NULL,
54 |
55 | state_ser_id INTEGER NOT NULL,
56 | state_ser_manifest VARCHAR(255),
57 | state_payload BYTEA NOT NULL,
58 | tags TEXT ARRAY,
59 |
60 | PRIMARY KEY(persistence_id, revision)
61 | );
62 |
63 | -- `durable_state_slice_idx` is only needed if the slice based queries are used
64 | CREATE INDEX IF NOT EXISTS durable_state_slice_idx ON durable_state(slice, entity_type, db_timestamp, revision);
--------------------------------------------------------------------------------
/OpenShift-4.1/node-deployment-no-namespace.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: extensions/v1beta1
3 | kind: Deployment
4 | metadata:
5 | name: node-ii
6 | spec:
7 | replicas: 2
8 | # selector:
9 | # matchLabels:
10 | # app: ArtifactStateCluster
11 | template:
12 | metadata:
13 | labels:
14 | app: ArtifactStateCluster
15 | tag: clusternode
16 | annotations:
17 | prometheus.io/scrape: 'true'
18 | spec:
19 | containers:
20 | - name: node-ii
21 | image: image-registry.openshift-image-registry.svc:5000/shopping-cart/cluster:latest
22 | imagePullPolicy: IfNotPresent
23 | #health
24 | livenessProbe:
25 | httpGet:
26 | path: /alive
27 | port: akka-mgmt-http
28 | readinessProbe:
29 | httpGet:
30 | path: /ready
31 | port: akka-mgmt-http
32 | #health
33 | env:
34 | - name: HOSTNAME
35 | valueFrom:
36 | fieldRef:
37 | apiVersion: v1
38 | fieldPath: status.podIP
39 | - name: CASSANDRA_CONTACT_POINT1
40 | value: cassandra-peers
41 | - name: JAVA_OPTS
42 | value: "-Dconfig.resource=cluster-application-k8s.conf"
43 | ports:
44 | # akka remoting
45 | - name: remoting
46 | containerPort: 2552
47 | protocol: TCP
48 | # external http
49 | - name: akka-mgmt-http
50 | containerPort: 8558
51 | protocol: TCP
52 | - name: node-ii-metrics
53 | containerPort: 9001
54 |
55 | restartPolicy: Always
56 |
57 | ---
58 | apiVersion: v1
59 | kind: Service
60 | metadata:
61 | name: node-ii
62 | spec:
63 | type: NodePort
64 | ports:
65 | - name: akka-mgmt-http
66 | protocol: TCP
67 | port: 8558
68 | targetPort: akka-mgmt-http
69 | nodePort: 30558
70 | selector:
71 | tag: clusternode
72 |
73 | ---
74 | kind: Role
75 | apiVersion: rbac.authorization.k8s.io/v1
76 | metadata:
77 | name: node-ii-reader
78 |
79 | rules:
80 | - apiGroups: [""] # "" indicates the core API group
81 | resources: ["pods"]
82 | verbs: ["get", "watch", "list"]
83 |
84 | ---
85 | kind: RoleBinding
86 | apiVersion: rbac.authorization.k8s.io/v1
87 | metadata:
88 | name: read-nodes
89 | subjects:
90 | # Note the `name` line below. The first default refers to the namespace. The second refers to the service account name.
91 | # For instance, `name: system:serviceaccount:myns:default` would refer to the default service account in namespace `myns`
92 | - kind: User
93 | name: system:serviceaccount:poc:default
94 | roleRef:
95 | kind: Role
96 | name: node-ii-reader
97 | apiGroup: rbac.authorization.k8s.io
98 |
--------------------------------------------------------------------------------
/client/src/main/scala/client/artifactstate/ArtifactStateStream.scala:
--------------------------------------------------------------------------------
1 | package client.artifactstate
2 |
3 | import akka.{Done, NotUsed}
4 | import akka.actor.typed.ActorSystem
5 | import akka.actor.typed.scaladsl.Behaviors
6 | import akka.grpc.GrpcClientSettings
7 | import akka.stream.scaladsl.Source
8 | import com.lightbend.artifactstate.endpoint.{ArtifactCommand, ArtifactStateServiceClient, CommandResponse, StreamedResponse}
9 |
10 | import scala.collection.mutable.ListBuffer
11 | import scala.concurrent.{ExecutionContext, Future}
12 | import scala.util.{Failure, Random, Success}
13 |
14 | // tag::clientStream[]
15 | object ArtifactStateStream extends App {
16 |
17 | implicit val sys: ActorSystem[_] = ActorSystem(Behaviors.empty, "ArtifactStateClient")
18 | implicit val ec: ExecutionContext = sys.executionContext
19 |
20 | val client = ArtifactStateServiceClient(GrpcClientSettings.fromConfig("client.ArtifactStateService"))
21 |
22 | var lastnames = ListBuffer.empty[String]
23 | for (line <- scala.io.Source.fromFile("./test-data/lastnames.csv").getLines) {
24 | lastnames += line.replaceAll("[\t\n]", "")
25 | }
26 |
27 | streamArtifactState
28 |
29 | def streamArtifactState : Unit = {
30 |
31 | val r = new Random()
32 |
33 | val commands = List("SetArtifactReadByUser", "SetArtifactAddedToUserFeed", "SetArtifactRemovedFromUserFeed", "XYZZY")
34 |
35 | val requestStream: Source[ArtifactCommand, NotUsed] = Source(1 to 2000).map { i =>
36 | val commandSelect = r.between(0, 4)
37 | val command = commands(commandSelect)
38 | val artifactId = r.between(0, 101)
39 | val userId = lastnames(r.between(1, 1001))
40 | println(s"transmitting data for artifactId ${artifactId} userId ${userId} ($i) command: ${command}")
41 | ArtifactCommand(artifactId, userId, command)
42 | }
43 |
44 | val responseStream: Source[StreamedResponse, NotUsed] = client.commandsStreamed(requestStream)
45 | var cnt = 0
46 | val done: Future[Done] =
47 | responseStream.runForeach{ reply =>
48 | cnt = cnt + 1
49 | reply.success match {
50 | case true =>
51 | println(s"streaming reply received ($cnt): ${reply.success}")
52 | case false =>
53 | println(s"streaming reply received ($cnt) a failure : ${reply.failureMsg} on ${reply.command}")
54 | }
55 | }
56 |
57 | done.onComplete {
58 | case Success(_) =>
59 | println("streamingBroadcast done")
60 | System.exit(0)
61 | case Failure(e) =>
62 | println(s"Error streamingBroadcast: $e")
63 | System.exit(0)
64 | }
65 | }
66 |
67 | }
68 | // end::clientStream[]
--------------------------------------------------------------------------------
/OpenShift-3.x/node-deployment.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: extensions/v1beta1
3 | kind: Deployment
4 | metadata:
5 | name: node
6 | namespace: poc
7 | spec:
8 | replicas: 2
9 | # selector:
10 | # matchLabels:
11 | # app: ArtifactStateCluster
12 | template:
13 | metadata:
14 | labels:
15 | app: ArtifactStateCluster
16 | tag: clusternode
17 | annotations:
18 | prometheus.io/scrape: 'true'
19 | spec:
20 | containers:
21 | - name: node
22 | image: akka-typed-blog-distributed-state/cluster:0.1.2
23 | imagePullPolicy: IfNotPresent
24 | #health
25 | readinessProbe:
26 | httpGet:
27 | path: /ready
28 | port: akka-mgmt-http
29 | initialDelaySeconds: 10
30 | periodSeconds: 5
31 | livenessProbe:
32 | httpGet:
33 | path: /alive
34 | port: akka-mgmt-http
35 | initialDelaySeconds: 90
36 | periodSeconds: 30
37 | #health
38 | env:
39 | - name: HOSTNAME
40 | valueFrom:
41 | fieldRef:
42 | apiVersion: v1
43 | fieldPath: status.podIP
44 | - name: CASSANDRA_CONTACT_POINT1
45 | value: cassandra-peers
46 | - name: JAVA_OPTS
47 | value: "-Dconfig.resource=cluster-application-k8s.conf"
48 | ports:
49 | # akka remoting
50 | - name: remoting
51 | containerPort: 2552
52 | protocol: TCP
53 | # external http
54 | - name: akka-mgmt-http
55 | containerPort: 8558
56 | protocol: TCP
57 | - name: node-metrics
58 | containerPort: 9001
59 |
60 | restartPolicy: Always
61 |
62 | ---
63 | apiVersion: v1
64 | kind: Service
65 | metadata:
66 | name: node
67 | namespace: poc
68 | spec:
69 | type: NodePort
70 | ports:
71 | - name: akka-mgmt-http
72 | protocol: TCP
73 | port: 8558
74 | targetPort: akka-mgmt-http
75 | nodePort: 30558
76 | selector:
77 | tag: clusternode
78 |
79 | ---
80 | kind: Role
81 | apiVersion: rbac.authorization.k8s.io/v1
82 | metadata:
83 | name: node-reader
84 | namespace: poc
85 |
86 | rules:
87 | - apiGroups: [""] # "" indicates the core API group
88 | resources: ["pods"]
89 | verbs: ["get", "watch", "list"]
90 |
91 | ---
92 | kind: RoleBinding
93 | apiVersion: rbac.authorization.k8s.io/v1
94 | metadata:
95 | name: read-nodes
96 | namespace: poc
97 | subjects:
98 | # Note the `name` line below. The first default refers to the namespace. The second refers to the service account name.
99 | # For instance, `name: system:serviceaccount:myns:default` would refer to the default service account in namespace `myns`
100 | - kind: User
101 | name: system:serviceaccount:poc:default
102 | roleRef:
103 | kind: Role
104 | name: node-reader
105 | apiGroup: rbac.authorization.k8s.io
106 |
--------------------------------------------------------------------------------
/OpenShift-4.1/node-deployment.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: extensions/v1beta1
3 | kind: Deployment
4 | metadata:
5 | name: node
6 | namespace: poc
7 | spec:
8 | replicas: 2
9 | # selector:
10 | # matchLabels:
11 | # app: ArtifactStateCluster
12 | template:
13 | metadata:
14 | labels:
15 | app: ArtifactStateCluster
16 | tag: clusternode
17 | annotations:
18 | prometheus.io/scrape: 'true'
19 | spec:
20 | containers:
21 | - name: node
22 | image: image-registry.openshift-image-registry.svc:5000/poc/cluster:latest
23 | imagePullPolicy: IfNotPresent
24 | #health
25 | readinessProbe:
26 | httpGet:
27 | path: /ready
28 | port: akka-mgmt-http
29 | initialDelaySeconds: 10
30 | periodSeconds: 5
31 | livenessProbe:
32 | httpGet:
33 | path: /alive
34 | port: akka-mgmt-http
35 | initialDelaySeconds: 90
36 | periodSeconds: 30
37 | #health
38 | env:
39 | - name: HOSTNAME
40 | valueFrom:
41 | fieldRef:
42 | apiVersion: v1
43 | fieldPath: status.podIP
44 | - name: CASSANDRA_CONTACT_POINT1
45 | value: cassandra-peers
46 | - name: JAVA_OPTS
47 | value: "-Dconfig.resource=cluster-application-k8s.conf"
48 | ports:
49 | # akka remoting
50 | - name: remoting
51 | containerPort: 2552
52 | protocol: TCP
53 | # external http
54 | - name: akka-mgmt-http
55 | containerPort: 8558
56 | protocol: TCP
57 | - name: node-metrics
58 | containerPort: 9001
59 |
60 | restartPolicy: Always
61 |
62 | ---
63 | apiVersion: v1
64 | kind: Service
65 | metadata:
66 | name: node
67 | namespace: poc
68 | spec:
69 | type: NodePort
70 | ports:
71 | - name: akka-mgmt-http
72 | protocol: TCP
73 | port: 8558
74 | targetPort: akka-mgmt-http
75 | nodePort: 30558
76 | selector:
77 | tag: clusternode
78 |
79 | ---
80 | kind: Role
81 | apiVersion: rbac.authorization.k8s.io/v1
82 | metadata:
83 | name: node-reader
84 | namespace: poc
85 |
86 | rules:
87 | - apiGroups: [""] # "" indicates the core API group
88 | resources: ["pods"]
89 | verbs: ["get", "watch", "list"]
90 |
91 | ---
92 | kind: RoleBinding
93 | apiVersion: rbac.authorization.k8s.io/v1
94 | metadata:
95 | name: read-nodes
96 | namespace: poc
97 | subjects:
98 | # Note the `name` line below. The first default refers to the namespace. The second refers to the service account name.
99 | # For instance, `name: system:serviceaccount:myns:default` would refer to the default service account in namespace `myns`
100 | - kind: User
101 | name: system:serviceaccount:poc:default
102 | roleRef:
103 | kind: Role
104 | name: node-reader
105 | apiGroup: rbac.authorization.k8s.io
106 |
--------------------------------------------------------------------------------
/docker-compose-dns-yugabyte.yml:
--------------------------------------------------------------------------------
1 |
2 | # Local Yugabyte database, see https://docs.yugabyte.com/latest/deploy/docker/docker-compose/
3 |
4 | volumes:
5 | yb-master-data-1:
6 | yb-tserver-data-1:
7 |
8 | services:
9 | yb-master:
10 | image: yugabytedb/yugabyte:2.12.2.0-b58
11 | container_name: yb-master-n1
12 | volumes:
13 | - yb-master-data-1:/mnt/master
14 | command: [ "/home/yugabyte/bin/yb-master",
15 | "--fs_data_dirs=/mnt/master",
16 | "--master_addresses=yb-master-n1:7100",
17 | "--rpc_bind_addresses=yb-master-n1:7100",
18 | "--replication_factor=1"]
19 | ports:
20 | - "7000:7000"
21 | environment:
22 | SERVICE_7000_NAME: yb-master
23 | networks:
24 | - statepoc
25 |
26 | yb-tserver:
27 | image: yugabytedb/yugabyte:2.12.2.0-b58
28 | container_name: yb-tserver-n1
29 | shm_size: '512mb'
30 | volumes:
31 | - yb-tserver-data-1:/mnt/tserver
32 | command: [ "/home/yugabyte/bin/yb-tserver",
33 | "--fs_data_dirs=/mnt/tserver",
34 | "--start_pgsql_proxy",
35 | "--rpc_bind_addresses=yb-tserver-n1:9100",
36 | "--tserver_master_addrs=yb-master-n1:7100",
37 | "--ysql_sequence_cache_minval=1",
38 | "--yb_num_shards_per_tserver=1"]
39 | ports:
40 | - "9042:9042"
41 | - "5433:5433"
42 | - "9000:9000"
43 | environment:
44 | SERVICE_5433_NAME: ysql
45 | SERVICE_9042_NAME: ycql
46 | SERVICE_6379_NAME: yedis
47 | SERVICE_9000_NAME: yb-tserver
48 | depends_on:
49 | - yb-master
50 | networks:
51 | - statepoc
52 |
53 | cluster:
54 | image: akka-typed-blog-distributed-state/cluster:0.1.4
55 | deploy:
56 | replicas: 3
57 | environment:
58 | JAVA_OPTS: "-Dconfig.resource=cluster-application-docker-dns.conf"
59 | CLUSTER_IP: cluster
60 | DB_HOST: yb-tserver
61 | CINNAMON_ELASTIC_HOSTS: elasticsearch:9200
62 | networks:
63 | - cinnamon-elasticsearch-docker-sandbox-2170_sandbox
64 | - statepoc
65 |
66 | # Note: if you need more than one instance of the endpoint,
67 | # you'll need to duplicate, instead of replicas, because of port conflicts
68 | endpoint:
69 | image: akka-typed-blog-distributed-state/cluster:0.1.4
70 | links:
71 | - cluster
72 | depends_on:
73 | - cluster
74 | environment:
75 | JAVA_OPTS: "-Dconfig.resource=endpoint-application-docker-dns.conf"
76 | CLUSTER_IP: endpoint
77 | CINNAMON_ELASTIC_HOSTS: elasticsearch:9200
78 | ports:
79 | - "8082:8082"
80 | - "8558:8558"
81 | networks:
82 | - cinnamon-elasticsearch-docker-sandbox-2170_sandbox
83 | - statepoc
84 |
85 |
86 | networks:
87 |
88 | #Note: this network name must match the version of the Telemetry sandbox
89 | cinnamon-elasticsearch-docker-sandbox-2170_sandbox:
90 | external: true
91 |
92 | statepoc:
93 | driver: bridge
94 | # ipam:
95 | # driver: default
96 | # config:
97 | # - subnet: 172.22.0.0/16
98 |
99 |
--------------------------------------------------------------------------------
/gatling/src/test/scala/com/lightbend/gatling/ArtifactStateScenario.scala:
--------------------------------------------------------------------------------
1 | package com.mread.gatling
2 |
3 | import com.typesafe.config.ConfigFactory
4 |
5 | import scala.concurrent.duration._
6 | import scala.util.Random
7 | import scala.language.postfixOps
8 | import io.gatling.core.Predef._
9 | import io.gatling.core.body.BodyWithStringExpression
10 | import io.gatling.http.Predef._
11 |
12 | class ArtifactStateScenario
13 | extends Simulation {
14 |
15 | private val config = ConfigFactory.load()
16 |
17 | val baseUrl = config.getString("loadtest.baseUrl")
18 |
19 | val namesFeeder = csv("lastnames.csv").random
20 |
21 | val artifactIds = Iterator.continually(
22 | // Random number will be accessible in session under variable "artifactId"
23 | Map("artifactId" -> Random.nextInt(500))
24 | )
25 |
26 | val httpConf = http
27 | .baseUrl(s"${baseUrl}/artifactState")
28 | .acceptHeader("application/json")
29 |
30 | val artifactAndUser: BodyWithStringExpression = StringBody("""{ "artifactId": #{artifactId}, "userId": "#{name}" }""")
31 |
32 | // a scenario that simply runs through all the various state changes
33 | val scn = scenario("ArtifactStateScenario")
34 | .feed(namesFeeder)
35 | .feed(artifactIds)
36 | .exec(
37 | http("set_artifact_read")
38 | .post("/setArtifactReadByUser")
39 | .body(artifactAndUser).asJson
40 | .check(status.is(200))
41 | )
42 |
43 | .exec(
44 | http("is_artifact_read")
45 | .post("/isArtifactReadByUser")
46 | .body(artifactAndUser).asJson
47 | .check(status.is(200))
48 | )
49 |
50 | .exec(
51 | http("set_artifact_in_feed")
52 | .post("/setArtifactAddedToUserFeed")
53 | .body(artifactAndUser).asJson
54 | .check(status.is(200))
55 | )
56 |
57 | .exec(
58 | http("is_artifact_in_user_feed")
59 | .post("/isArtifactInUserFeed")
60 | .body(artifactAndUser).asJson
61 | .check(status.is(200))
62 | )
63 |
64 | .exec(
65 | http("set_artifact_removed_from_feed")
66 | .post("/setArtifactRemovedFromUserFeed")
67 | .body(artifactAndUser).asJson
68 | .check(status.is(200))
69 | )
70 |
71 | .exec(
72 | http("get_all_states")
73 | .post("/getAllStates")
74 | .body(artifactAndUser).asJson
75 | .check(status.is(200))
76 | )
77 |
78 | setUp(
79 | // scn.inject(atOnceUsers(1))
80 | // scn.inject(rampUsers(100) during (3 minutes))
81 | scn.inject(rampUsers(1000) during (5 minutes))
82 | // simulation set up -> -> https://docs.gatling.io/reference/script/core/injection/#open-model
83 | /*
84 |
85 | scn.inject(
86 | nothingFor(4 seconds), // 1
87 | atOnceUsers(10), // 2
88 | rampUsers(10) during (5 seconds), // 3
89 | constantUsersPerSec(20) during (15 seconds), // 4
90 | constantUsersPerSec(20) during (15 seconds) randomized, // 5
91 | rampUsersPerSec(100) to 20 during (10 minutes), // 6
92 | rampUsersPerSec(100) to 20 during (10 minutes) randomized, // 7
93 | stressPeakUsers(1000).during(20 seconds) // 8
94 | )
95 | */
96 | .protocols(httpConf)
97 | )
98 | }
99 |
--------------------------------------------------------------------------------
/OpenShift-3.x/endpoint-deployment.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: extensions/v1beta1
3 | kind: Deployment
4 | metadata:
5 | name: endpoint
6 | namespace: poc
7 | spec:
8 | replicas: 1
9 | strategy: {}
10 | template:
11 | metadata:
12 | creationTimestamp: null
13 | labels:
14 | app: ArtifactStateCluster
15 | tag: endpoint
16 | annotations:
17 | prometheus.io/scrape: 'true'
18 | spec:
19 | containers:
20 | - env:
21 | - name: CASSANDRA_CONTACT_POINT1
22 | value: cassandra-peers
23 | - name: CLUSTER_IP
24 | value: endpoint
25 | - name: CLUSTER_PORT
26 | value: "1601"
27 | - name: SEED_PORT_1600_TCP_ADDR
28 | value: seed
29 | - name: JAVA_OPTS
30 | value: "-Dconfig.resource=endpoint-application-k8s.conf"
31 | image: akka-typed-blog-distributed-state/cluster:0.1.2
32 | imagePullPolicy: IfNotPresent
33 | #health
34 | livenessProbe:
35 | httpGet:
36 | path: /alive
37 | port: akka-mgmt-http
38 | readinessProbe:
39 | httpGet:
40 | path: /ready
41 | port: akka-mgmt-http
42 | #health
43 | name: endpoint
44 | ports:
45 | - containerPort: 8082
46 | # akka remoting
47 | - name: remoting
48 | containerPort: 2552
49 | protocol: TCP
50 | # external http
51 | - name: akka-mgmt-http
52 | containerPort: 8558
53 | protocol: TCP
54 | - name: ep-metrics
55 | containerPort: 9001
56 |
57 | resources: {}
58 | restartPolicy: Always
59 | status: {}
60 |
61 | ---
62 | apiVersion: v1
63 | kind: Service
64 | metadata:
65 | name: endpoint
66 | namespace: poc
67 | spec:
68 | type: NodePort
69 | ports:
70 | - name: "8082"
71 | protocol: TCP
72 | port: 8082
73 | targetPort: 8082
74 | nodePort: 30082
75 | selector:
76 | tag: endpoint
77 |
78 | ---
79 | #
80 | # Create a role, `endpoint-reader`, that can list pods and
81 | # bind the default service account in the `default` namespace
82 | # to that role.
83 | #
84 |
85 | kind: Role
86 | apiVersion: rbac.authorization.k8s.io/v1
87 | metadata:
88 | name: endpoint-reader
89 | namespace: poc
90 | rules:
91 | - apiGroups: [""] # "" indicates the core API group
92 | resources: ["pods"]
93 | verbs: ["get", "watch", "list"]
94 | ---
95 | kind: RoleBinding
96 | apiVersion: rbac.authorization.k8s.io/v1
97 | metadata:
98 | name: read-endpoints
99 | namespace: poc
100 | subjects:
101 | # Note the `name` line below. The first default refers to the namespace. The second refers to the service account name.
102 | # For instance, `name: system:serviceaccount:myns:default` would refer to the default service account in namespace `myns`
103 | - kind: User
104 | name: system:serviceaccount:poc:default
105 | roleRef:
106 | kind: Role
107 | name: endpoint-reader
108 | apiGroup: rbac.authorization.k8s.io
109 |
110 | ---
111 | apiVersion: route.openshift.io/v1
112 | kind: Route
113 | metadata:
114 | name: endpoint-route
115 | namespace: poc
116 | spec:
117 | port:
118 | targetPort: '8082'
119 | to:
120 | kind: Service
121 | name: endpoint
122 | weight: 100
123 | wildcardPolicy: None
124 |
--------------------------------------------------------------------------------
/OpenShift-4.1/endpoint-deployment.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: extensions/v1beta1
3 | kind: Deployment
4 | metadata:
5 | name: endpoint
6 | spec:
7 | replicas: 1
8 | selector:
9 | matchLabels:
10 | app: endpoint
11 | strategy: {}
12 | template:
13 | metadata:
14 | creationTimestamp: null
15 | labels:
16 | app: ArtifactStateCluster
17 | tag: endpoint
18 | annotations:
19 | prometheus.io/scrape: 'true'
20 | spec:
21 | containers:
22 | - env:
23 | - name: CASSANDRA_CONTACT_POINT1
24 | value: cassandra-peers
25 | - name: CLUSTER_IP
26 | value: endpoint
27 | - name: CLUSTER_PORT
28 | value: "1601"
29 | - name: SEED_PORT_1600_TCP_ADDR
30 | value: seed
31 | - name: JAVA_OPTS
32 | value: "-Dconfig.resource=endpoint-application-k8s.conf"
33 | image: image-registry.openshift-image-registry.svc:5000/poc/cluster:latest
34 | imagePullPolicy: IfNotPresent
35 | #health
36 | livenessProbe:
37 | httpGet:
38 | path: /alive
39 | port: akka-mgmt-http
40 | readinessProbe:
41 | httpGet:
42 | path: /ready
43 | port: akka-mgmt-http
44 | #health
45 | name: endpoint
46 | ports:
47 | - containerPort: 8082
48 | # akka remoting
49 | - name: remoting
50 | containerPort: 2552
51 | protocol: TCP
52 | # external http
53 | - name: akka-mgmt-http
54 | containerPort: 8558
55 | protocol: TCP
56 | - name: ep-metrics
57 | containerPort: 9001
58 |
59 | resources: {}
60 | restartPolicy: Always
61 | status: {}
62 |
63 | ---
64 | apiVersion: v1
65 | kind: Service
66 | metadata:
67 | name: endpoint
68 | namespace: poc
69 | spec:
70 | type: NodePort
71 | ports:
72 | - name: "8082"
73 | protocol: TCP
74 | port: 8082
75 | targetPort: 8082
76 | nodePort: 30082
77 | selector:
78 | tag: endpoint
79 |
80 | ---
81 | #
82 | # Create a role, `endpoint-reader`, that can list pods and
83 | # bind the default service account in the `default` namespace
84 | # to that role.
85 | #
86 |
87 | kind: Role
88 | apiVersion: rbac.authorization.k8s.io/v1
89 | metadata:
90 | name: endpoint-reader
91 | namespace: poc
92 | rules:
93 | - apiGroups: [""] # "" indicates the core API group
94 | resources: ["pods"]
95 | verbs: ["get", "watch", "list"]
96 | ---
97 | kind: RoleBinding
98 | apiVersion: rbac.authorization.k8s.io/v1
99 | metadata:
100 | name: read-endpoints
101 | namespace: poc
102 | subjects:
103 | # Note the `name` line below. The first default refers to the namespace. The second refers to the service account name.
104 | # For instance, `name: system:serviceaccount:myns:default` would refer to the default service account in namespace `myns`
105 | - kind: User
106 | name: system:serviceaccount:poc:default
107 | roleRef:
108 | kind: Role
109 | name: endpoint-reader
110 | apiGroup: rbac.authorization.k8s.io
111 |
112 | ---
113 | apiVersion: route.openshift.io/v1
114 | kind: Route
115 | metadata:
116 | name: endpoint-route
117 | namespace: poc
118 | spec:
119 | port:
120 | targetPort: '8082'
121 | to:
122 | kind: Service
123 | name: endpoint
124 | weight: 100
125 | wildcardPolicy: None
126 |
--------------------------------------------------------------------------------
/client/src/main/scala/client/artifactstate/ArtifactStateForEach.scala:
--------------------------------------------------------------------------------
1 | package client.artifactstate
2 |
3 | import akka.actor.typed.ActorSystem
4 | import akka.actor.typed.scaladsl.Behaviors
5 | import akka.grpc.GrpcClientSettings
6 | import com.lightbend.artifactstate.endpoint.{ArtifactAndUser, ArtifactCommand, ArtifactStateServiceClient, CommandResponse}
7 |
8 | import scala.collection._
9 | import scala.collection.mutable.ListBuffer
10 | import scala.concurrent.duration._
11 | import scala.concurrent.{Await, ExecutionContext, Future}
12 | import scala.util.{Failure, Random, Success}
13 |
14 | // tag::clientForEach[]
15 | object ArtifactStateForEach extends App {
16 |
17 | implicit val sys: ActorSystem[_] = ActorSystem(Behaviors.empty, "ArtifactStateClient")
18 | implicit val ec: ExecutionContext = sys.executionContext
19 |
20 | val client = ArtifactStateServiceClient(GrpcClientSettings.fromConfig("client.ArtifactStateService"))
21 |
22 | var lastnames = ListBuffer.empty[String]
23 | for (line <- scala.io.Source.fromFile("./test-data/lastnames.csv").getLines) {
24 | lastnames += line.replaceAll("[\t\n]", "")
25 | }
26 |
27 | val commands = List("SetArtifactReadByUser", "SetArtifactAddedToUserFeed", "SetArtifactRemovedFromUserFeed")
28 |
29 | var cnt: Int = 0
30 | val r = new Random()
31 |
32 | val replies = mutable.ListBuffer.empty[Future[CommandResponse]]
33 | for (cnt <- 1 to 1000) {
34 | val commandSelect = r.between(0, 3)
35 | val command = commands(commandSelect)
36 | val artifactId = r.between(0, 101)
37 | val userId = lastnames(r.between(1, 1001))
38 | replies += singleRequestReply(ArtifactAndUser(artifactId, userId), command, cnt)
39 | }
40 | println(s"requests sent ${replies.size}")
41 | Await.result(Future.sequence(replies), Duration(5, MINUTES))
42 | println("Done.")
43 | System.exit(0)
44 |
45 | def singleRequestReply(artifactAndUser: ArtifactAndUser, command: String, cnt: Int): Future[CommandResponse] = {
46 | println(s"transmitting data ($cnt) for artifactId ${artifactAndUser.artifactId} userId ${artifactAndUser.userId} command: ${command}")
47 | val reply = command match {
48 | case "SetArtifactReadByUser" =>
49 | client.setArtifactReadByUser(artifactAndUser)
50 | case "SetArtifactAddedToUserFeed" =>
51 | client.setArtifactReadByUser(artifactAndUser)
52 | case "SetArtifactRemovedFromUserFeed" =>
53 | client.setArtifactRemovedFromUserFeed(artifactAndUser)
54 | case _ =>
55 | Future.failed(new Throwable("Unsupported command encountered."))
56 | }
57 | reply.onComplete {
58 | case Success(commandResponse: CommandResponse) =>
59 | commandResponse match {
60 | case commandResponse if commandResponse.success =>
61 | println(s"reply received ($cnt): successful command ${command} on ${artifactAndUser}")
62 | case commandResponse if !commandResponse.success =>
63 | println(s"reply received ($cnt): failed command ${command} on ${artifactAndUser}")
64 | case _ =>
65 | println("unrecognized response")
66 | }
67 |
68 | case Failure(e) =>
69 | println(s"reply received ($cnt) a failure : ${e.getMessage} on ${command}")
70 | }
71 | reply
72 | }
73 |
74 | }
75 | // end::clientForEach[]
--------------------------------------------------------------------------------
/telemetry/install-akka-grafana-dashboards.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | kubectl -n monitoring create cm akka-actors --from-file=./akka-dashboards/akka-actors.json
3 | kubectl -n monitoring label cm akka-actors grafana_dashboard="1"
4 | kubectl -n monitoring create cm akka-ask-pattern --from-file=./akka-dashboards/akka-ask-pattern.json
5 | kubectl -n monitoring label cm akka-ask-pattern grafana_dashboard="1"
6 | kubectl -n monitoring create cm akka-circuit-breakers --from-file=./akka-dashboards/akka-circuit-breakers.json
7 | kubectl -n monitoring label cm akka-circuit-breakers grafana_dashboard="1"
8 | kubectl -n monitoring create cm akka-cluster --from-file=./akka-dashboards/akka-cluster.json
9 | kubectl -n monitoring label cm akka-cluster grafana_dashboard="1"
10 | kubectl -n monitoring create cm akka-cluster-sharding --from-file=./akka-dashboards/akka-cluster-sharding.json
11 | kubectl -n monitoring label cm akka-cluster-sharding grafana_dashboard="1"
12 | kubectl -n monitoring create cm akka-dispatchers --from-file=./akka-dashboards/akka-dispatchers.json
13 | kubectl -n monitoring label cm akka-dispatchers grafana_dashboard="1"
14 | kubectl -n monitoring create cm akka-events --from-file=./akka-dashboards/akka-events.json
15 | kubectl -n monitoring label cm akka-events grafana_dashboard="1"
16 | kubectl -n monitoring create cm akka-http-clients --from-file=./akka-dashboards/akka-http-clients.json
17 | kubectl -n monitoring label cm akka-http-clients grafana_dashboard="1"
18 | kubectl -n monitoring create cm akka-http-endpoints --from-file=./akka-dashboards/akka-http-endpoints.json
19 | kubectl -n monitoring label cm akka-http-endpoints grafana_dashboard="1"
20 | kubectl -n monitoring create cm akka-http-servers --from-file=./akka-dashboards/akka-http-servers.json
21 | kubectl -n monitoring label cm akka-http-servers grafana_dashboard="1"
22 | kubectl -n monitoring create cm akka-persistence --from-file=./akka-dashboards/akka-persistence.json
23 | kubectl -n monitoring label cm akka-persistence grafana_dashboard="1"
24 | kubectl -n monitoring create cm akka-projections --from-file=./akka-dashboards/akka-projections.json
25 | kubectl -n monitoring label cm akka-projections grafana_dashboard="1"
26 | kubectl -n monitoring create cm akka-remote-actors --from-file=./akka-dashboards/akka-remote-actors.json
27 | kubectl -n monitoring label cm akka-remote-actors grafana_dashboard="1"
28 | kubectl -n monitoring create cm akka-remote-nodes --from-file=./akka-dashboards/akka-remote-nodes.json
29 | kubectl -n monitoring label cm akka-remote-nodes grafana_dashboard="1"
30 | kubectl -n monitoring create cm akka-routers --from-file=./akka-dashboards/akka-routers.json
31 | kubectl -n monitoring label cm akka-routers grafana_dashboard="1"
32 | kubectl -n monitoring create cm akka-stopwatches --from-file=./akka-dashboards/akka-stopwatches.json
33 | kubectl -n monitoring label cm akka-stopwatches grafana_dashboard="1"
34 | kubectl -n monitoring create cm akka-streams-extended --from-file=./akka-dashboards/akka-streams-extended.json
35 | kubectl -n monitoring label cm akka-streams-extended grafana_dashboard="1"
36 | kubectl -n monitoring create cm akka-streams --from-file=./akka-dashboards/akka-streams.json
37 | kubectl -n monitoring label cm akka-streams grafana_dashboard="1"
38 | kubectl -n monitoring create cm java-futures --from-file=./akka-dashboards/java-futures.json
39 | kubectl -n monitoring label cm java-futures grafana_dashboard="1"
40 | kubectl -n monitoring create cm jvm-metrics --from-file=./akka-dashboards/jvm-metrics.json
41 | kubectl -n monitoring label cm jvm-metrics grafana_dashboard="1"
42 | kubectl -n monitoring create cm scala-futures --from-file=./akka-dashboards/scala-futures.json
43 | kubectl -n monitoring label cm scala-futures grafana_dashboard="1"
--------------------------------------------------------------------------------
/microk8s/README.md:
--------------------------------------------------------------------------------
1 | # Running the PoC on Microk8s
2 |
3 | Microk8s is my new favorite way to run a local copy of k8s over Minikube.
4 |
5 | [Micro8s](https://microk8s.io/) is a K8s distribution from Canonical, who are also known for their popular Linux distribution, Ubuntu. Microk8s is a small, fast, and fully-conformant K8s that makes clustering trivial. It’s a perfect environment for offline development, prototyping, and testing and we’ll be using it for the remainder of this guide.
6 |
7 | Once you’ve completed the Microk8s installation, we highly recommend issuing the inspect command to make sure everything is okay. For example,
8 |
9 | ```
10 | microk8s inspect
11 | ```
12 |
13 | ## Enable add-ons
14 | To run Akka Data Pipelines and the example on microk8s, install the following add-ons:
15 |
16 | * CoreDNS
17 | * helm3 - Kubernetes package manager
18 | * hostpath-storage - allocates storage from host directory
19 | * traefik - Ingress controller for external access
20 | * registry - a private image registry and expose it on localhost:32000.
21 |
22 | Before installing add-ons, to avoid potential permission problems, be sure that the current user is part of the `microk8s` group.
23 | Enable the Microk8s add-ons with the following commands in your terminal window:
24 |
25 | ```
26 | microk8s enable dns
27 | microk8s enable helm3
28 | microk8s enable hostpath-storage
29 | microk8s enable traefik
30 | microk8s enable registry
31 | ```
32 | ## Helpful Command Aliases
33 | The kubectl and helm CLIs are provided by Microk8s.To quickly adapt to using traditional commands within Microk8s we recommend creating a `.bash_aliases` file in your home directory that contains the following:
34 |
35 | ```
36 | alias kubectl='microk8s kubectl'
37 | alias k='microk8s kubectl'
38 | alias helm='microk8s helm3'
39 | alias cf='microk8s kubectl cloudflow'
40 | ```
41 |
42 | ## Build the docker image locally with sbt
43 |
44 | sbt docker:publishLocal
45 |
46 | ## Copy the image to your Microk8s registry
47 | For more specifics, please see the docs [here](https://microk8s.io/docs/registry-built-in).
48 |
49 | ## Deploy to Microk8s
50 | In this example, we're using Cassandra as our Akka Persistence database.
51 |
52 | k apply -f K8s/cassandra/
53 | k apply -f microk8s/nodes
54 | k apply -f microk8s/endpoints/
55 |
56 | ## Traefik Ingress
57 | Traefik provides a great new HTTP ingress, which also happens to support gRPC, so we're taking advantage of it here. Given it's flexiablity, I decided to do away with NodePort services and converted them to ClusterIP, and then provided the proper ingress YAMLs for each the `node` and `endpoint` services.
58 |
59 | ## Akka Management - Cluster HTTP Management
60 |
61 | An ingress has been provided to the Cluster HTTP Management module for the Akka Cluster. For example, to see the status of the cluster you can use the following:
62 |
63 | ```
64 | curl localhost:8080/cluster/members | python -m json.tool
65 | ```
66 | For more information other options, [please see the API Definition](https://doc.akka.io/docs/akka-management/current/cluster-http-management.html#api-definition).
67 |
68 | ## Deployment
69 | 1. deploy Cassandra using the yamls in ./K8s/cassandra
70 | ```
71 | k apply -f ../k8s/cassandra
72 | ```
73 | 2. build to local docker:
74 | ```
75 | sbt docker:publishLocal
76 | ```
77 | 3. tag the image. For example,
78 | ```
79 | docker tag localhost:32000/akka-typed-blog-distributed-state/cluster:0.1.3
80 | ```
81 | 4. then push to the microk8s registry
82 | ```
83 | docker push localhost:32000/akka-typed-blog-distributed-state/cluster:0.1.3
84 | ```
85 | 5. then deploy `nodes`
86 | ```
87 | k apply -f nodes
88 | ```
89 | 6. then deploy `endpoints`
90 | ```
91 | k apply -f endpoints
92 | ```
93 | 7. before putting any load on the sytem issue the following command to make sure the Cassandra tables have been created.
94 | ```
95 | curl -d '{"artifactId":1, "userId":"Michael"}' -H "Content-Type: application/json" -X POST localhost:8080/artifactState/setArtifactReadByUser
96 |
97 | curl 'localhost:8080/artifactState/getAllStates?artifactId=1&userId=Michael'
98 | ```
99 | You can also test the gRPC endpoints:
100 | ```
101 | grpcurl -plaintext localhost:8080 list
102 |
103 | grpcurl -plaintext -d '{"artifactId":1, "userId":"Michael"}' localhost:8080 ArtifactStateService/GetAllStates
104 | ```
105 | 8. You can also follow the logs for all the nodes with one command:
106 | ```
107 | k logs -f -l app=ArtifactStateCluster
108 | ```
109 |
110 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | 
2 | # How To Distribute Application State with Akka Cluster
3 |
4 | Building, testing, containerizing, deploying, and monitoring distributed microservices is difficult, but using Lightbend technologies can make it a lot faster and easier to be successful.
5 | In this four part blog series, we walk you through a working Proof of Concept (PoC) built using Lightbend’s open source distributed toolkit, Akka. This PoC delivers a resilient, highly performant, elastic, distributed, and consistent state solution that provides an automatic in memory cache with a persistent backing. Here is the breakdown:
6 | - [Part 1](https://www.lightbend.com/blog/how-to-distribute-application-state-with-akka-cluster-part-1-getting-started) - Getting Started: we walk through building, testing, and running the PoC locally, with instrumentation and monitoring wired in from the very beginning using Lightbend Telemetry.
7 | - [Part 2](https://www.lightbend.com/blog/how-to-distribute-application-state-with-akka-cluster-part-2-docker-and-local-deploy) - Docker and Local Deploy: here we cover containerizing our PoC, and then deploying locally in Docker. Then, we’ll load test and monitor our PoC as we did in the first installment.
8 | - [Part 3](https://www.lightbend.com/blog/how-to-distribute-application-state-with-akka-cluster-part-3-kubernetes-monitoring) - Kubernetes and Monitoring: in this part,we introduce Lightbend Console for Kubernetes (K8s), and then deploy our PoC in Minikube (desktop version of K8s) using YAML files provided in this repository. Again, we’ll load test with Gatling, but this time we’ll monitor our PoC in Lightbend Console.
9 | - [Part 4](https://www.lightbend.com/blog/how-to-distribute-application-state-with-akka-cluster-part-4-the-source-code) - Source Code: In our final installment, we do a deep dive into our Scala source code.
10 | > Note: This repoistory is written for **Scala** developers, a **Java** version can be found [here](https://github.com/michael-read/akka-typed-distributed-state-blog-java).
11 | ----------------
12 | ## Update(s) September, 2024
13 | - Move to Java 21
14 | - upgrade all Akka Dependencies to release v24.05.3
15 | - upgrade SBT to v1.10.0
16 | - upgrade Gatling to v3.12.0
17 | - fixed bug in Gatling scenario
18 | - migrate docker-compose & nonsup-docker-compose to use Akka Cluster Bootstrap w/ DNS instead of using Seed nodes for Akka Cluster formation.
19 |
20 | ## Update September 20, 2022
21 | - updated the branch `mread-multi-dc-common-journal`, which supports Akka's Replicated Event Sourcing "Common Journal" running on Multi-Data Center / Microk8s and Yugabyte. Updated README.md can be found [here](https://github.com/michael-read/akka-typed-distributed-state-blog/tree/mread-multi-dc-common-journal/microk8s-multi-dc).
22 |
23 | ## Update September 15, 2022
24 | - update Akka related dependencies to latest
25 | - Moved to Java 17 base image
26 | - Set up for Akka DNS cluster formation on Docker w/ Yugabyte. Doc [here](DOCKER_DNS_YUGABYTE.md).
27 |
28 | ## Update: April 18, 2022
29 | - Updated Akka, Akka Http, and Akka gRPC dependencies with the latest versions
30 | - recreated the missing gRPC Client (ArtifactStateForEach) example for request / response
31 |
32 | ## Update: February 22, 2022
33 | - Merged PR to include gRPC support even though blog hasn't been updated yet. No ETA on blog update.
34 | - This repo written in Java can be found [here](https://github.com/michael-read/akka-typed-distributed-state-blog-java).
35 |
36 | ## Update: August 31, 2021
37 | - On Branch: mread-add-grpc-endpoint: updated the instructions for deploying locally on Microk8s.
38 |
39 | ## Update: August 11, 2021
40 | - On Branch: mread-add-grpc-endpoint, which will be merged to master, once we update the blog post
41 | - fix Cassandra v3.11.11 docker-compose files
42 |
43 | ## Update: August 6, 2021
44 | - fixed deprecation warnings
45 |
46 | ## Update: August 5, 2021
47 | - Upgraded all versions of the Lightbend stack's dependencies to the currently released versions.
48 | * The biggest change revolves around the change of the `akka-persistence-cassandra` (v0.102 -> v1.0.5), which requires a migration in tables and configuration.
49 | * Scala: v2.13.1 -> v2.13.6
50 | * Sbt v1.3.9 -> v1.5.5
51 | * Akka: v2.6.6 -> v2.6.15
52 | * Akka Http: v10.1.12 -> v10.2.5
53 | * Akka Management v1.0.8 -> v1.1.1
54 | * Lightbend Telemetry -> v2.14.0 -> v2.16.1
55 | * Java: v8 -> v11
56 |
57 | - Microk8s - tested locally on Microk8s, please see [microk8s/README.md](microk8s/README.md) for more information.
58 | - locked in to cassandra image to: v3.11.11
59 |
60 | ## Update: June 10, 2020
61 |
62 | In honor of Lightbend’s release of Akka’s Split Brain Resolver as OSS, I’ve updated this repository to take advantage of Akka 2.6.6.
63 |
64 | For more information on Akka Split Brain Resolver please see the announcement here → https://akka.io/blog/news/2020/06/08/akka-2.6.6-released-split-brain-resolver
65 |
66 | Also, Patrik Nordwall’s video on Split Brain Resolver is really great in helping you understand why you should care → https://akka.io/blog/news/2020/06/08/akka-split-brain-resolver-video
--------------------------------------------------------------------------------
/src/main/scala/com/lightbend/artifactstate/app/StartNode.scala:
--------------------------------------------------------------------------------
1 | package com.lightbend.artifactstate.app
2 |
3 | import akka.actor.typed.scaladsl.Behaviors
4 | import akka.actor.typed.scaladsl.adapter.TypedActorSystemOps
5 | import akka.actor.typed.{ActorRef, ActorSystem, Behavior}
6 | import akka.cluster.sharding.typed.scaladsl.{ClusterSharding, Entity, EntityTypeKey}
7 | import akka.cluster.sharding.typed.{ClusterShardingSettings, ShardingEnvelope}
8 | import akka.cluster.typed.Cluster
9 | import akka.http.scaladsl.Http
10 | import akka.http.scaladsl.model.{HttpRequest, HttpResponse}
11 | import akka.http.scaladsl.server.Directives.{concat, handle}
12 | import akka.http.scaladsl.server.Route
13 | import akka.management.cluster.bootstrap.ClusterBootstrap
14 | import akka.management.scaladsl.AkkaManagement
15 | import akka.{Done, NotUsed, actor}
16 | import com.lightbend.artifactstate.actors.ArtifactStateEntityActor.{ArtifactCommand, ArtifactStatesShardName}
17 | import com.lightbend.artifactstate.actors.{ArtifactStateEntityActor, ClusterListenerActor}
18 | import com.lightbend.artifactstate.endpoint.{ArtifactStateRoutes, ArtifactStateServiceHandler, GrpcArtifactStateServiceImpl}
19 | import com.typesafe.config.ConfigFactory
20 |
21 | import scala.concurrent.{ExecutionContextExecutor, Future}
22 |
23 | object StartNode {
24 | private val appConfig = ConfigFactory.load()
25 |
26 | def main(args: Array[String]): Unit = {
27 | val clusterName = appConfig.getString("clustering.cluster.name")
28 | val clusterPort = appConfig.getInt("clustering.port")
29 | val defaultPort = appConfig.getInt("clustering.defaultPort")
30 | if (appConfig.hasPath("clustering.ports")) {
31 | val clusterPorts = appConfig.getIntList("clustering.ports")
32 | clusterPorts.forEach { port =>
33 | startNode(RootBehavior(port, defaultPort), clusterName)
34 | }
35 | }
36 | else {
37 | startNode(RootBehavior(clusterPort, defaultPort), clusterName)
38 | }
39 | }
40 |
41 | private object RootBehavior {
42 | def apply(port: Int, defaultPort: Int): Behavior[NotUsed] =
43 | Behaviors.setup { context =>
44 | implicit val classicSystem: actor.ActorSystem = TypedActorSystemOps(context.system).toClassic
45 |
46 | val TypeKey = EntityTypeKey[ArtifactCommand](ArtifactStatesShardName)
47 |
48 | val cluster = Cluster(context.system)
49 |
50 | context.log.info(s"starting node with roles:")
51 | cluster.selfMember.roles.foreach { role =>
52 | context.log.info(s"role : $role")
53 | }
54 |
55 | if (cluster.selfMember.hasRole("k8s") || cluster.selfMember.hasRole("dns")) {
56 | AkkaManagement(classicSystem).start()
57 | ClusterBootstrap(classicSystem).start()
58 | }
59 |
60 | if (cluster.selfMember.hasRole("sharded")) {
61 | ClusterSharding(context.system).init(Entity(TypeKey)
62 | (createBehavior = ctx => ArtifactStateEntityActor(ctx.entityId))
63 | .withSettings(ClusterShardingSettings(context.system).withRole("sharded")))
64 | }
65 | else {
66 | if (cluster.selfMember.hasRole("endpoint")) {
67 | implicit val ec: ExecutionContextExecutor = context.system.executionContext
68 | val psCommandActor: ActorRef[ShardingEnvelope[ArtifactCommand]] =
69 | ClusterSharding(context.system).init(Entity(TypeKey)
70 | (createBehavior = ctx => ArtifactStateEntityActor(ctx.entityId)))
71 |
72 | lazy val routes: Route = new ArtifactStateRoutes(context.system, psCommandActor).psRoutes
73 | val httpPort = context.system.settings.config.getString("akka.http.server.default-http-port")
74 | val interface = if (cluster.selfMember.hasRole("docker")
75 | || cluster.selfMember.hasRole("k8s")
76 | || cluster.selfMember.hasRole("dns")) {
77 | "0.0.0.0"
78 | }
79 | else {
80 | "localhost"
81 | }
82 |
83 | // Create gRPC service handler
84 | val grpcService: HttpRequest => Future[HttpResponse] =
85 | ArtifactStateServiceHandler.withServerReflection(new GrpcArtifactStateServiceImpl(context.system, psCommandActor))
86 |
87 | // As a Route
88 | val grpcHandlerRoute: Route = handle(grpcService)
89 |
90 | val route = concat(routes, grpcHandlerRoute)
91 |
92 | // Both HTTP and gRPC Binding
93 | val binding = Http().newServerAt(interface, httpPort.toInt).bind(route)
94 |
95 | binding.foreach { binding => context.system.log.info(s"HTTP / gRPC Server online at ip ${binding.localAddress} port $httpPort") }
96 | }
97 | }
98 |
99 | if (port == defaultPort) {
100 | context.spawn(ClusterListenerActor(), "clusterListenerActor")
101 | context.system.log.info("started clusterListenerActor")
102 | }
103 |
104 | Behaviors.empty
105 | }
106 | }
107 |
108 |
109 | def startNode(behavior: Behavior[NotUsed], clusterName: String): Future[Done] = {
110 | val system = ActorSystem(behavior, clusterName, appConfig)
111 | system.whenTerminated // remove compiler warnings
112 | }
113 |
114 | }
115 |
--------------------------------------------------------------------------------
/src/main/scala/com/lightbend/artifactstate/actors/ArtifactStateEntityActor.scala:
--------------------------------------------------------------------------------
1 | package com.lightbend.artifactstate.actors
2 |
3 | import akka.actor.typed.ActorRef
4 | import akka.actor.typed.Behavior
5 | import akka.persistence.typed.PersistenceId
6 | import akka.persistence.typed.scaladsl.Effect
7 | import akka.persistence.typed.scaladsl.EventSourcedBehavior
8 | import com.lightbend.artifactstate.serializer.{EventSerializeMarker, MsgSerializeMarker}
9 |
10 | object ArtifactStateEntityActor {
11 |
12 | final val ArtifactStatesShardName = "ArtifactState"
13 |
14 | sealed trait BaseId extends MsgSerializeMarker {
15 | val artifactId: Long
16 | val userId: String
17 | }
18 | sealed trait ArtifactCommand extends BaseId
19 | sealed trait ArtifactQuery extends ArtifactCommand
20 | sealed trait ArtifactResponse extends MsgSerializeMarker
21 |
22 | // queries
23 | final case class IsArtifactReadByUser(replyTo: ActorRef[ArtifactReadByUser], artifactId: Long, userId: String) extends ArtifactQuery
24 | final case class IsArtifactInUserFeed(replyTo: ActorRef[ArtifactInUserFeed], artifactId: Long, userId: String) extends ArtifactQuery
25 | final case class GetAllStates(replyTo: ActorRef[AllStates], artifactId: Long, userId: String) extends ArtifactQuery
26 |
27 | // commands
28 | final case class SetArtifactRead(replyTo: ActorRef[Okay], artifactId: Long, userId: String) extends ArtifactCommand
29 | final case class SetArtifactAddedToUserFeed(replyTo: ActorRef[Okay], artifactId: Long, userId: String) extends ArtifactCommand
30 | final case class SetArtifactRemovedFromUserFeed(replyTo: ActorRef[Okay], artifactId: Long, userId: String) extends ArtifactCommand
31 |
32 | // responses
33 | final case class Okay(okay: String = "OK") extends ArtifactResponse
34 | final case class ArtifactReadByUser(artifactRead: Boolean) extends ArtifactResponse
35 | final case class ArtifactInUserFeed(artifactInUserFeed: Boolean) extends ArtifactResponse
36 | final case class AllStates(artifactRead: Boolean, artifactInUserFeed: Boolean) extends ArtifactResponse
37 |
38 | // events
39 | sealed trait ArtifactEvent extends EventSerializeMarker
40 | final case class ArtifactRead(mark: String) extends ArtifactEvent
41 | final case class ArtifactAddedToUserFeed() extends ArtifactEvent
42 | final case class ArtifactRemovedFromUserFeed() extends ArtifactEvent
43 |
44 | final case class CurrState(artifactRead: Boolean = false, artifactInUserFeed : Boolean = false) extends MsgSerializeMarker
45 |
46 | def apply(entityId: String): Behavior[ArtifactCommand] =
47 | EventSourcedBehavior[ArtifactCommand, ArtifactEvent, CurrState](
48 | persistenceId = PersistenceId(ArtifactStatesShardName, entityId),
49 | emptyState = CurrState(),
50 | commandHandler,
51 | eventHandler)
52 |
53 | private val commandHandler: (CurrState, ArtifactCommand) => Effect[ArtifactEvent, CurrState] = { (state, command) =>
54 | command match {
55 | case SetArtifactRead (replyTo, _, _) => artifactRead(replyTo, state)
56 | case SetArtifactAddedToUserFeed (replyTo, _, _) => artifactAddedToUserFeed(replyTo, state)
57 | case SetArtifactRemovedFromUserFeed (replyTo, _, _) => artifactRemovedFromUserFeed(replyTo, state)
58 |
59 | case IsArtifactReadByUser (replyTo, _, _) => getArtifactRead(replyTo, state)
60 | case IsArtifactInUserFeed (replyTo, _, _) => getAritfactInFeed (replyTo, state)
61 | case GetAllStates (replyTo, _, _) => getArtifactState (replyTo, state)
62 | }
63 | }
64 |
65 | private def artifactRead(replyTo: ActorRef[Okay], currState: CurrState): Effect[ArtifactEvent, CurrState] = {
66 | Effect.persist(ArtifactRead("Mike was here")).thenRun(_ => replyTo ! Okay())
67 | }
68 |
69 | private def artifactAddedToUserFeed(replyTo: ActorRef[Okay], currState: CurrState): Effect[ArtifactEvent, CurrState] = {
70 | Effect.persist(ArtifactAddedToUserFeed()).thenRun(_ => replyTo ! Okay())
71 | }
72 |
73 | private def artifactRemovedFromUserFeed(replyTo: ActorRef[Okay], currState: CurrState): Effect[ArtifactEvent, CurrState] = {
74 | Effect.persist(ArtifactRemovedFromUserFeed()).thenRun(_ => replyTo ! Okay())
75 | }
76 |
77 | private def getArtifactRead(replyTo: ActorRef[ArtifactReadByUser], currState: CurrState): Effect[ArtifactEvent, CurrState] = {
78 | replyTo ! ArtifactReadByUser(currState.artifactRead)
79 | Effect.none
80 | }
81 |
82 | private def getAritfactInFeed(replyTo: ActorRef[ArtifactInUserFeed], currState: CurrState): Effect[ArtifactEvent, CurrState] = {
83 | replyTo ! ArtifactInUserFeed(currState.artifactInUserFeed)
84 | Effect.none
85 | }
86 |
87 | private def getArtifactState(replyTo: ActorRef[AllStates], currState: CurrState): Effect[ArtifactEvent, CurrState] = {
88 | replyTo ! AllStates(currState.artifactRead, currState.artifactInUserFeed)
89 | Effect.none
90 | }
91 |
92 | private val eventHandler: (CurrState, ArtifactEvent) => CurrState = { (state, event) =>
93 | event match {
94 | case ArtifactRead(_) =>
95 | CurrState(artifactRead = true, artifactInUserFeed = state.artifactInUserFeed)
96 |
97 | case ArtifactAddedToUserFeed() =>
98 | CurrState(state.artifactRead, artifactInUserFeed = true)
99 |
100 | case ArtifactRemovedFromUserFeed() =>
101 | CurrState(state.artifactRead)
102 |
103 | case _ => throw new IllegalStateException(s"unexpected event [$event] in state [$state]")
104 | }
105 | }
106 | }
--------------------------------------------------------------------------------
/telemetry/akka-dashboards/akka-remote-nodes.json:
--------------------------------------------------------------------------------
1 | {
2 | "id": null,
3 | "title": "Akka Remote Nodes",
4 | "tags": [],
5 | "style": "dark",
6 | "timezone": "browser",
7 | "editable": true,
8 | "hideControls": true,
9 | "sharedCrosshair": false,
10 | "rows": [
11 | {
12 | "title": "",
13 | "collapse": false,
14 | "editable": true,
15 | "height": "250px",
16 | "panels": [
17 | {
18 | "datasource": "Cinnamon Prometheus",
19 | "title": "Phi Accrual Value",
20 | "description": "The nodes in the cluster monitor each other by sending heartbeats to detect if a node is unreachable from the rest of the cluster. The heartbeat arrival times is interpreted by an implementation of The Phi Accrual Failure Detector.",
21 | "type": "timeseries",
22 | "id": 1,
23 | "interval": "10s",
24 | "fieldConfig": {
25 | "defaults": {
26 | "custom": {
27 | "drawStyle": "line",
28 | "lineInterpolation": "linear",
29 | "lineWidth": 2,
30 | "fillOpacity": 10,
31 | "gradientMode": "none",
32 | "showPoints": "never",
33 | "stacking": {
34 | "mode": "none"
35 | },
36 | "axisPlacement": "auto",
37 | "axisLabel": null
38 | },
39 | "color": {
40 | "mode": "palette-classic"
41 | },
42 | "unit": "short",
43 | "min": 0,
44 | "max": null
45 | }
46 | },
47 | "options": {
48 | "legend": {
49 | "showLegend": false
50 | },
51 | "tooltip": {
52 | "mode": "multi",
53 | "sort": "desc"
54 | }
55 | },
56 | "span": 6,
57 | "targets": [
58 | {
59 | "expr": "remote_node_phi_accrual_value{application=~\"$Applications\", host=~\"$Servers\", self_node=~\"$SelfNodes\", remote_node=~\"$RemoteNodes\"}",
60 | "format": "time_series",
61 | "legendFormat": "{{host}} / {{self_node}} / {{remote_node}} / phi-value"
62 | },
63 | {
64 | "expr": "self_node_phi_accrual_threshold_value{application=~\"$Applications\", host=~\"$Servers\", self_node=~\"$SelfNodes\"}",
65 | "format": "time_series",
66 | "legendFormat": "{{host}} / {{self_node}} / phi-threshold-value"
67 | }
68 | ]
69 | }
70 | ],
71 | "repeat": "Applications",
72 | "title": "$Applications"
73 | }
74 | ],
75 | "time": {
76 | "from": "now-15m",
77 | "to": "now"
78 | },
79 | "timepicker": {
80 | "refresh_intervals": [
81 | "5s",
82 | "10s",
83 | "30s",
84 | "1m",
85 | "5m",
86 | "15m",
87 | "30m",
88 | "1h",
89 | "2h",
90 | "1d"
91 | ],
92 | "time_options": [
93 | "5m",
94 | "15m",
95 | "1h",
96 | "6h",
97 | "12h",
98 | "24h",
99 | "2d",
100 | "7d",
101 | "30d"
102 | ]
103 | },
104 | "templating": {
105 | "list": [
106 | {
107 | "current": {},
108 | "datasource": "Cinnamon Prometheus",
109 | "hide": 0,
110 | "includeAll": true,
111 | "allValue": ".*",
112 | "multi": true,
113 | "name": "Applications",
114 | "options": [],
115 | "query": "label_values(application)",
116 | "refresh": 2,
117 | "regex": "",
118 | "tagValuesQuery": "",
119 | "tagsQuery": "name",
120 | "type": "query",
121 | "useTags": false
122 | },
123 | {
124 | "current": {},
125 | "datasource": "Cinnamon Prometheus",
126 | "hide": 0,
127 | "includeAll": true,
128 | "allValue": ".*",
129 | "multi": true,
130 | "name": "Servers",
131 | "options": [],
132 | "query": "label_values(remote_node_cluster_domain_event{application=~\"$Applications\"}, host)",
133 | "refresh": 2,
134 | "regex": "",
135 | "tagValuesQuery": "",
136 | "tagsQuery": "name",
137 | "type": "query",
138 | "useTags": false
139 | },
140 | {
141 | "current": {},
142 | "datasource": "Cinnamon Prometheus",
143 | "hide": 0,
144 | "includeAll": true,
145 | "allValue": ".*",
146 | "multi": true,
147 | "name": "SelfNodes",
148 | "options": [],
149 | "query": "label_values(remote_node_cluster_domain_event{application=~\"$Applications\", host=~\"$Servers\"}, self_node)",
150 | "refresh": 2,
151 | "regex": "",
152 | "tagValuesQuery": "",
153 | "tagsQuery": "name",
154 | "type": "query",
155 | "useTags": false
156 | },
157 | {
158 | "current": {},
159 | "datasource": "Cinnamon Prometheus",
160 | "hide": 0,
161 | "includeAll": true,
162 | "allValue": ".*",
163 | "multi": true,
164 | "name": "RemoteNodes",
165 | "options": [],
166 | "query": "label_values(remote_node_cluster_domain_event{application=~\"$Applications\", host=~\"$Servers\", self_node=~\"$SelfNodes\"}, remote_node)",
167 | "refresh": 2,
168 | "regex": "",
169 | "tagValuesQuery": "",
170 | "tagsQuery": "name",
171 | "type": "query",
172 | "useTags": false
173 | }
174 | ]
175 | },
176 | "annotations": {
177 | "list": []
178 | },
179 | "refresh": false,
180 | "schemaVersion": 12,
181 | "version": 3,
182 | "links": [],
183 | "gnetId": null
184 | }
--------------------------------------------------------------------------------
/src/main/scala/com/lightbend/artifactstate/endpoint/GrpcArtifactStateServiceImpl.scala:
--------------------------------------------------------------------------------
1 | package com.lightbend.artifactstate.endpoint
2 |
3 | import akka.NotUsed
4 | import akka.actor.typed.scaladsl.AskPattern.Askable
5 | import akka.actor.typed.{ActorRef, ActorSystem, Scheduler}
6 | import akka.cluster.sharding.typed.ShardingEnvelope
7 | import akka.stream.scaladsl.Source
8 | import akka.util.Timeout
9 | import com.lightbend.artifactstate.actors.ArtifactStateEntityActor._
10 | import com.lightbend.artifactstate.endpoint
11 |
12 | import scala.concurrent.{ExecutionContextExecutor, Future}
13 |
14 | class GrpcArtifactStateServiceImpl(system: ActorSystem[Nothing], psCommandActor: ActorRef[ShardingEnvelope[ArtifactCommand]]) extends ArtifactStateService {
15 |
16 | private implicit val ec: ExecutionContextExecutor = system.executionContext
17 |
18 | // If ask takes more time than this to complete the request is failed
19 | private implicit val timeout: Timeout = Timeout.create(system.settings.config.getDuration("app.routes.ask-timeout"))
20 | implicit val scheduler: Scheduler = system.scheduler
21 |
22 | def handleResponse(req: ArtifactAndUser, f: Future[ArtifactResponse]): Future[ExtResponse] = {
23 | f.map {
24 | case ArtifactReadByUser(artifactRead) =>
25 | ExtResponse(req.artifactId, req.userId, artifactRead)
26 | case ArtifactInUserFeed(artifactInUserFeed) =>
27 | ExtResponse(req.artifactId, req.userId, artifactInUserFeed)
28 | case _ =>
29 | ExtResponse(req.artifactId, req.userId, failureMsg = "Internal Query Error: this shouldn't happen.")
30 | }
31 | }
32 |
33 | def handleCmdResponse(req: ArtifactAndUser, f: Future[ArtifactResponse]): Future[CommandResponse] = f.map {
34 | case Okay(_) => CommandResponse(success = true)
35 | case _ =>
36 | system.log.error("Internal Command Error: this shouldn't happen.")
37 | CommandResponse()
38 | }.recover {
39 | case ex: Exception =>
40 | system.log.error(s"failure on request user: ${req.userId} artifact id: ${req.artifactId} ${ex.getMessage}", ex)
41 | CommandResponse()
42 | }
43 |
44 | /**
45 | * queries
46 | */
47 | override def isArtifactReadByUser(req: ArtifactAndUser): Future[ExtResponse] = {
48 | val result = psCommandActor.ask { ref : ActorRef[ArtifactResponse] =>
49 | ShardingEnvelope("%d%s".format(req.artifactId, req.userId), IsArtifactReadByUser(ref, req.artifactId, req.userId))
50 | }
51 | handleResponse(req, result)
52 | }
53 |
54 | override def isArtifactInUserFeed(req: ArtifactAndUser): Future[ExtResponse] = {
55 | val result = psCommandActor.ask { ref : ActorRef[ArtifactResponse] =>
56 | ShardingEnvelope("%d%s".format(req.artifactId, req.userId), IsArtifactInUserFeed(ref, req.artifactId, req.userId))
57 | }
58 | handleResponse(req, result)
59 | }
60 |
61 | override def getAllStates(req: ArtifactAndUser): Future[AllStatesResponse] = {
62 | val f = psCommandActor.ask { ref : ActorRef[ArtifactResponse] =>
63 | ShardingEnvelope("%d%s".format(req.artifactId, req.userId), GetAllStates(ref, req.artifactId, req.userId))
64 | }
65 | f.map {
66 | case AllStates(artifactRead, artifactInUserFeed) =>
67 | AllStatesResponse(req.artifactId, req.userId, artifactRead, artifactInUserFeed)
68 | case _ =>
69 | AllStatesResponse(req.artifactId, req.userId, failureMsg = "Internal Error: this shouldn't happen.")
70 | }.recover {
71 | case ex: Exception =>
72 | system.log.error(ex.getMessage, ex)
73 | AllStatesResponse(req.artifactId, req.userId, failureMsg = ex.getMessage)
74 | }
75 | }
76 |
77 | /**
78 | * commands
79 | */
80 | override def setArtifactReadByUser(req: ArtifactAndUser): Future[CommandResponse] = {
81 | val result = psCommandActor.ask { ref : ActorRef[ArtifactResponse] =>
82 | ShardingEnvelope("%d%s".format(req.artifactId, req.userId), SetArtifactRead(ref, req.artifactId, req.userId))
83 | }
84 | handleCmdResponse(req, result)
85 | }
86 |
87 | override def setArtifactAddedToUserFeed(req: ArtifactAndUser): Future[CommandResponse] = {
88 | val result = psCommandActor.ask { ref : ActorRef[ArtifactResponse] =>
89 | ShardingEnvelope("%d%s".format(req.artifactId, req.userId), SetArtifactAddedToUserFeed(ref, req.artifactId, req.userId))
90 | }
91 | handleCmdResponse(req, result)
92 | }
93 |
94 | override def setArtifactRemovedFromUserFeed(req: ArtifactAndUser): Future[CommandResponse] = {
95 | val result = psCommandActor.ask { ref : ActorRef[ArtifactResponse] =>
96 | ShardingEnvelope("%d%s".format(req.artifactId, req.userId), SetArtifactRemovedFromUserFeed(ref, req.artifactId, req.userId))
97 | }
98 | handleCmdResponse(req, result)
99 | }
100 |
101 | override def commandsStreamed(in: Source[endpoint.ArtifactCommand, NotUsed]): Source[StreamedResponse, NotUsed] = {
102 | val validCommands = Set("SetArtifactReadByUser", "SetArtifactAddedToUserFeed", "SetArtifactRemovedFromUserFeed")
103 |
104 | in.mapAsync(5) { command => // parallelism should be configurable
105 | // validate the command first
106 | if (validCommands.contains(command.command)) {
107 | val result = psCommandActor.ask { ref: ActorRef[ArtifactResponse] =>
108 | command.command match {
109 | case "SetArtifactReadByUser" =>
110 | ShardingEnvelope("%d%s".format(command.artifactId, command.userId), SetArtifactRead(ref, command.artifactId, command.userId))
111 | case "SetArtifactAddedToUserFeed" =>
112 | ShardingEnvelope("%d%s".format(command.artifactId, command.userId), SetArtifactAddedToUserFeed(ref, command.artifactId, command.userId))
113 | case "SetArtifactRemovedFromUserFeed" =>
114 | ShardingEnvelope("%d%s".format(command.artifactId, command.userId), SetArtifactRemovedFromUserFeed(ref, command.artifactId, command.userId))
115 | }
116 | }
117 | handleCmdResponse(ArtifactAndUser(command.artifactId, command.userId), result).map { response =>
118 | StreamedResponse(response.success, command = Some(command))
119 | }
120 | }
121 | else {
122 | val errMsg = s"invalid command received ${command.command} for user: ${command.userId} artifact id: ${command.artifactId}"
123 | system.log.error(errMsg)
124 | Future.successful(StreamedResponse(failureMsg = errMsg, command = Some(command)))
125 | }
126 | }
127 | .named("commandsStreamedIn")
128 | }
129 | }
130 |
--------------------------------------------------------------------------------
/telemetry/akka-dashboards/akka-cluster.json:
--------------------------------------------------------------------------------
1 | {
2 | "id": null,
3 | "title": "Akka Cluster",
4 | "tags": [],
5 | "style": "dark",
6 | "timezone": "browser",
7 | "editable": true,
8 | "hideControls": true,
9 | "sharedCrosshair": false,
10 | "rows": [
11 | {
12 | "title": "",
13 | "collapse": false,
14 | "editable": true,
15 | "height": "250px",
16 | "panels": [
17 | {
18 | "datasource": "Cinnamon Prometheus",
19 | "title": "Reachable/unreachable nodes from $Nodes",
20 | "description": "",
21 | "type": "timeseries",
22 | "id": 1,
23 | "interval": "10s",
24 | "fieldConfig": {
25 | "defaults": {
26 | "custom": {
27 | "drawStyle": "line",
28 | "lineInterpolation": "linear",
29 | "lineWidth": 2,
30 | "fillOpacity": 10,
31 | "gradientMode": "none",
32 | "showPoints": "never",
33 | "stacking": {
34 | "mode": "none"
35 | },
36 | "axisPlacement": "auto",
37 | "axisLabel": "node count"
38 | },
39 | "color": {
40 | "mode": "palette-classic"
41 | },
42 | "unit": "short",
43 | "min": 0,
44 | "max": null
45 | }
46 | },
47 | "options": {
48 | "legend": {
49 | "showLegend": false
50 | },
51 | "tooltip": {
52 | "mode": "multi",
53 | "sort": "desc"
54 | }
55 | },
56 | "span": 6,
57 | "targets": [
58 | {
59 | "expr": "self_node_reachable_nodes{application=~\"$Applications\", host=~\"$Servers\", self_node=~\"$Nodes\"}",
60 | "format": "time_series",
61 | "legendFormat": "{{host}} / {{node}} / {{reachable}}"
62 | },
63 | {
64 | "expr": "self_node_unreachable_nodes{application=~\"$Applications\", host=~\"$Servers\", self_node=~\"$Nodes\"}",
65 | "format": "time_series",
66 | "legendFormat": "{{host}} / {{node}} / {{unreachable}}"
67 | }
68 | ]
69 | },
70 | {
71 | "datasource": "Cinnamon Prometheus",
72 | "title": "Split Brain Resolver events from $Nodes",
73 | "description": "",
74 | "type": "timeseries",
75 | "id": 2,
76 | "interval": "10s",
77 | "fieldConfig": {
78 | "defaults": {
79 | "custom": {
80 | "drawStyle": "line",
81 | "lineInterpolation": "linear",
82 | "lineWidth": 2,
83 | "fillOpacity": 10,
84 | "gradientMode": "none",
85 | "showPoints": "never",
86 | "stacking": {
87 | "mode": "none"
88 | },
89 | "axisPlacement": "auto",
90 | "axisLabel": "event count"
91 | },
92 | "color": {
93 | "mode": "palette-classic"
94 | },
95 | "unit": "short",
96 | "min": 0,
97 | "max": null
98 | }
99 | },
100 | "options": {
101 | "legend": {
102 | "showLegend": false
103 | },
104 | "tooltip": {
105 | "mode": "multi",
106 | "sort": "desc"
107 | }
108 | },
109 | "span": 6,
110 | "targets": [
111 | {
112 | "alias" : "sbr-events",
113 | "expr": "self_node_split_brain_resolver_event{application=~\"$Applications\", host=~\"$Servers\", self_node=~\"$Nodes\"}",
114 | "format": "time_series",
115 | "legendFormat": "{{host}} / {{node}}"
116 | }
117 | ]
118 | }
119 | ],
120 | "repeat": "Applications",
121 | "title": "$Applications"
122 | }
123 | ],
124 | "time": {
125 | "from": "now-15m",
126 | "to": "now"
127 | },
128 | "timepicker": {
129 | "refresh_intervals": [
130 | "5s",
131 | "10s",
132 | "30s",
133 | "1m",
134 | "5m",
135 | "15m",
136 | "30m",
137 | "1h",
138 | "2h",
139 | "1d"
140 | ],
141 | "time_options": [
142 | "5m",
143 | "15m",
144 | "1h",
145 | "6h",
146 | "12h",
147 | "24h",
148 | "2d",
149 | "7d",
150 | "30d"
151 | ]
152 | },
153 | "templating": {
154 | "list": [
155 | {
156 | "current": {},
157 | "datasource": "Cinnamon Prometheus",
158 | "hide": 0,
159 | "includeAll": true,
160 | "allValue": ".*",
161 | "multi": true,
162 | "name": "Applications",
163 | "options": [],
164 | "query": "label_values(application)",
165 | "refresh": 2,
166 | "regex": "",
167 | "tagValuesQuery": "",
168 | "tagsQuery": "name",
169 | "type": "query",
170 | "useTags": false
171 | },
172 | {
173 | "current": {},
174 | "datasource": "Cinnamon Prometheus",
175 | "hide": 0,
176 | "includeAll": true,
177 | "allValue": ".*",
178 | "multi": true,
179 | "name": "Servers",
180 | "options": [],
181 | "query": "label_values(self_node_reachable_nodes{application=~\"$Applications\"}, host)",
182 | "refresh": 2,
183 | "regex": "",
184 | "tagValuesQuery": "",
185 | "tagsQuery": "name",
186 | "type": "query",
187 | "useTags": false
188 | },
189 | {
190 | "current": {},
191 | "datasource": "Cinnamon Prometheus",
192 | "hide": 0,
193 | "includeAll": true,
194 | "allValue": ".*",
195 | "multi": true,
196 | "name": "Nodes",
197 | "options": [],
198 | "query": "label_values(self_node_reachable_nodes{application=~\"$Applications\", host=~\"$Servers\"}, self_node)",
199 | "refresh": 2,
200 | "regex": "",
201 | "tagValuesQuery": "",
202 | "tagsQuery": "name",
203 | "type": "query",
204 | "useTags": false
205 | }
206 | ]
207 | },
208 | "annotations": {
209 | "list": []
210 | },
211 | "refresh": false,
212 | "schemaVersion": 12,
213 | "version": 3,
214 | "links": [],
215 | "gnetId": null
216 | }
--------------------------------------------------------------------------------
/telemetry/akka-dashboards/akka-cluster-sharding.json:
--------------------------------------------------------------------------------
1 | {
2 | "id": null,
3 | "title": "Akka Cluster Sharding",
4 | "tags": [],
5 | "style": "dark",
6 | "timezone": "browser",
7 | "editable": true,
8 | "hideControls": true,
9 | "sharedCrosshair": false,
10 | "rows": [
11 | {
12 | "title": "",
13 | "collapse": false,
14 | "editable": true,
15 | "height": "250px",
16 | "panels": [
17 | {
18 | "datasource": "Cinnamon Prometheus",
19 | "title": "Shard per region",
20 | "description": "",
21 | "type": "timeseries",
22 | "id": 1,
23 | "interval": "10s",
24 | "fieldConfig": {
25 | "defaults": {
26 | "custom": {
27 | "drawStyle": "line",
28 | "lineInterpolation": "linear",
29 | "lineWidth": 2,
30 | "fillOpacity": 10,
31 | "gradientMode": "none",
32 | "showPoints": "never",
33 | "stacking": {
34 | "mode": "none"
35 | },
36 | "axisPlacement": "auto",
37 | "axisLabel": ""
38 | },
39 | "color": {
40 | "mode": "palette-classic"
41 | },
42 | "unit": "short",
43 | "min": 0,
44 | "max": null
45 | }
46 | },
47 | "options": {
48 | "legend": {
49 | "showLegend": false
50 | },
51 | "tooltip": {
52 | "mode": "multi",
53 | "sort": "desc"
54 | }
55 | },
56 | "span": 6,
57 | "targets": [
58 | {
59 | "expr": "shard_region_shard_count_value{application=~\"$Applications\", host=~\"$Servers\", self_node=~\"$Nodes\", shard_region=~\"$Regions\"}",
60 | "format": "time_series",
61 | "legendFormat": "{{host}} / {{self_node}} / {{shard_region}}"
62 | }
63 | ]
64 | },
65 | {
66 | "datasource": "Cinnamon Prometheus",
67 | "title": "Shard entities per shard",
68 | "description": "",
69 | "type": "timeseries",
70 | "id": 2,
71 | "interval": "10s",
72 | "fieldConfig": {
73 | "defaults": {
74 | "custom": {
75 | "drawStyle": "line",
76 | "lineInterpolation": "linear",
77 | "lineWidth": 2,
78 | "fillOpacity": 10,
79 | "gradientMode": "none",
80 | "showPoints": "never",
81 | "stacking": {
82 | "mode": "none"
83 | },
84 | "axisPlacement": "auto",
85 | "axisLabel": ""
86 | },
87 | "color": {
88 | "mode": "palette-classic"
89 | },
90 | "unit": "short",
91 | "min": 0,
92 | "max": null
93 | }
94 | },
95 | "options": {
96 | "legend": {
97 | "showLegend": false
98 | },
99 | "tooltip": {
100 | "mode": "multi",
101 | "sort": "desc"
102 | }
103 | },
104 | "span": 6,
105 | "targets": [
106 | {
107 | "expr": "shard_shard_entity_count_value{application=~\"$Applications\", host=~\"$Servers\", self_node=~\"$Nodes\", shard_region=~\"$Regions\"}",
108 | "format": "time_series",
109 | "legendFormat": "{{host}} / {{self_node}} / {{shard_region}} / {{shard}}"
110 | }
111 | ]
112 | }
113 | ],
114 | "repeat": "Applications",
115 | "title": "$Applications"
116 | }
117 | ],
118 | "time": {
119 | "from": "now-15m",
120 | "to": "now"
121 | },
122 | "timepicker": {
123 | "refresh_intervals": [
124 | "5s",
125 | "10s",
126 | "30s",
127 | "1m",
128 | "5m",
129 | "15m",
130 | "30m",
131 | "1h",
132 | "2h",
133 | "1d"
134 | ],
135 | "time_options": [
136 | "5m",
137 | "15m",
138 | "1h",
139 | "6h",
140 | "12h",
141 | "24h",
142 | "2d",
143 | "7d",
144 | "30d"
145 | ]
146 | },
147 | "templating": {
148 | "list": [
149 | {
150 | "current": {},
151 | "datasource": "Cinnamon Prometheus",
152 | "hide": 0,
153 | "includeAll": true,
154 | "allValue": ".*",
155 | "multi": true,
156 | "name": "Applications",
157 | "options": [],
158 | "query": "label_values(application)",
159 | "refresh": 2,
160 | "regex": "",
161 | "tagValuesQuery": "",
162 | "tagsQuery": "name",
163 | "type": "query",
164 | "useTags": false
165 | },
166 | {
167 | "current": {},
168 | "datasource": "Cinnamon Prometheus",
169 | "hide": 0,
170 | "includeAll": true,
171 | "allValue": ".*",
172 | "multi": true,
173 | "name": "Servers",
174 | "options": [],
175 | "query": "label_values(shard_region_shard_count_value{application=~\"$Applications\"}, host)",
176 | "refresh": 2,
177 | "regex": "",
178 | "tagValuesQuery": "",
179 | "tagsQuery": "name",
180 | "type": "query",
181 | "useTags": false
182 | },
183 | {
184 | "current": {},
185 | "datasource": "Cinnamon Prometheus",
186 | "hide": 0,
187 | "includeAll": true,
188 | "allValue": ".*",
189 | "multi": true,
190 | "name": "Nodes",
191 | "options": [],
192 | "query": "label_values(shard_region_shard_count_value{host=~\"$Servers\", application=~\"$Applications\"}, self_node)",
193 | "refresh": 2,
194 | "regex": "",
195 | "tagValuesQuery": "",
196 | "tagsQuery": "name",
197 | "type": "query",
198 | "useTags": false
199 | },
200 | {
201 | "current": {},
202 | "datasource": "Cinnamon Prometheus",
203 | "hide": 0,
204 | "includeAll": true,
205 | "allValue": ".*",
206 | "multi": true,
207 | "name": "Regions",
208 | "options": [],
209 | "query": "label_values(shard_region_shard_count_value{host=~\"$Servers\", application=~\"$Applications\", self_node=~\"$Nodes\"}, shard_region)",
210 | "refresh": 2,
211 | "regex": "",
212 | "tagValuesQuery": "",
213 | "tagsQuery": "name",
214 | "type": "query",
215 | "useTags": false
216 | }
217 | ]
218 | },
219 | "annotations": {
220 | "list": []
221 | },
222 | "refresh": false,
223 | "schemaVersion": 12,
224 | "version": 3,
225 | "links": [],
226 | "gnetId": null
227 | }
--------------------------------------------------------------------------------
/src/main/scala/com/lightbend/artifactstate/endpoint/ArtifactStateRoutes.scala:
--------------------------------------------------------------------------------
1 | package com.lightbend.artifactstate.endpoint
2 |
3 | import akka.actor.typed.scaladsl.AskPattern._
4 | import akka.actor.typed.{ActorRef, ActorSystem, Scheduler}
5 | import akka.cluster.sharding.typed.ShardingEnvelope
6 | import akka.http.scaladsl.model.{HttpResponse, StatusCodes}
7 | import akka.http.scaladsl.server.Directives._
8 | import akka.http.scaladsl.server.directives.MethodDirectives.{get, post}
9 | import akka.http.scaladsl.server.directives.RouteDirectives.complete
10 | import akka.http.scaladsl.server.{ExceptionHandler, Route}
11 | import akka.util.Timeout
12 | import com.lightbend.artifactstate.actors.ArtifactStateEntityActor._
13 | import com.lightbend.artifactstate.endpoint.ArtifactStatePocAPI._
14 |
15 | import scala.concurrent.{ExecutionContextExecutor, Future}
16 | import scala.language.postfixOps
17 |
18 | class ArtifactStateRoutes(system: ActorSystem[Nothing], psCommandActor: ActorRef[ShardingEnvelope[ArtifactCommand]]) {
19 |
20 | import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport._
21 | import JsonFormats._
22 |
23 | // If ask takes more time than this to complete the request is failed
24 | private implicit val timeout: Timeout = Timeout.create(system.settings.config.getDuration("app.routes.ask-timeout"))
25 | private implicit val ec: ExecutionContextExecutor = system.executionContext
26 |
27 | implicit val scheduler: Scheduler = system.scheduler
28 |
29 | def handleResponse(req: ArtifactAndUser, f: Future[ArtifactResponse]): Future[ExtResponse] = {
30 | f.map {
31 | case ArtifactReadByUser(artifactRead) =>
32 | ExtResponse(req.artifactId, req.userId, Some(artifactRead), None)
33 | case ArtifactInUserFeed(artifactInUserFeed) =>
34 | ExtResponse(req.artifactId, req.userId, Some(artifactInUserFeed), None)
35 | case _ =>
36 | ExtResponse(req.artifactId, req.userId, None, Some("Internal Query Error: this shouldn't happen."))
37 | }
38 | }
39 |
40 | def queryArtifactRead(req: ArtifactAndUser): Future[ExtResponse] = {
41 | val result = psCommandActor.ask { ref : ActorRef[ArtifactResponse] =>
42 | ShardingEnvelope("%d%s".format(req.artifactId, req.userId), IsArtifactReadByUser(ref, req.artifactId, req.userId))
43 | }
44 | handleResponse(req, result)
45 | }
46 |
47 | def queryArtifactInUserFeed(req: ArtifactAndUser): Future[ExtResponse] = {
48 | val result = psCommandActor.ask { ref : ActorRef[ArtifactResponse] =>
49 | ShardingEnvelope("%d%s".format(req.artifactId, req.userId), IsArtifactInUserFeed(ref, req.artifactId, req.userId))
50 | }
51 | handleResponse(req, result)
52 | }
53 |
54 | def queryAllStates(req: ArtifactAndUser): Future[AllStatesResponse] = {
55 | val f = psCommandActor.ask { ref : ActorRef[ArtifactResponse] =>
56 | ShardingEnvelope("%d%s".format(req.artifactId, req.userId), GetAllStates(ref, req.artifactId, req.userId))
57 | }
58 | f.map {
59 | case AllStates(artifactRead, artifactInUserFeed) =>
60 | AllStatesResponse(req.artifactId, req.userId, Some(artifactRead), Some(artifactInUserFeed), None)
61 | case _ =>
62 | AllStatesResponse(req.artifactId, req.userId, None, None, Some("Internal Error: this shouldn't happen."))
63 | }.recover {
64 | case ex: Exception =>
65 | system.log.error(ex.getMessage, ex)
66 | AllStatesResponse(req.artifactId, req.userId, None, None, Some(ex.getMessage))
67 | }
68 | }
69 |
70 | def handleCmdResponse(req: ArtifactAndUser, f: Future[ArtifactResponse]): Future[CommandResponse] = {
71 | f.map {
72 | case Okay(_) => CommandResponse(true)
73 | case _ =>
74 | system.log.error("Internal Command Error: this shouldn't happen.")
75 | CommandResponse(false)
76 | }.recover {
77 | case ex: Exception =>
78 | system.log.error(s"failure on request user: ${req.userId} artifact id: ${req.artifactId} ${ex.getMessage}", ex)
79 | CommandResponse(false)
80 | }
81 | }
82 |
83 | def cmdArtifactRead(req: ArtifactAndUser): Future[CommandResponse] = {
84 | val result = psCommandActor.ask { ref : ActorRef[ArtifactResponse] =>
85 | ShardingEnvelope("%d%s".format(req.artifactId, req.userId), SetArtifactRead(ref, req.artifactId, req.userId))
86 | }
87 | handleCmdResponse(req, result)
88 | }
89 |
90 | def cmdArtifactAddedToUserFeed(req: ArtifactAndUser): Future[CommandResponse] = {
91 | val result = psCommandActor.ask { ref : ActorRef[ArtifactResponse] =>
92 | ShardingEnvelope("%d%s".format(req.artifactId, req.userId), SetArtifactAddedToUserFeed(ref, req.artifactId, req.userId))
93 | }
94 | handleCmdResponse(req, result)
95 | }
96 |
97 | def cmdArtifactRemovedFromUserFeed(req: ArtifactAndUser): Future[CommandResponse] = {
98 | val result = psCommandActor.ask { ref : ActorRef[ArtifactResponse] =>
99 | ShardingEnvelope("%d%s".format(req.artifactId, req.userId), SetArtifactRemovedFromUserFeed(ref, req.artifactId, req.userId))
100 | }
101 | handleCmdResponse(req, result)
102 | }
103 |
104 | implicit def myExceptionHandler: ExceptionHandler =
105 | ExceptionHandler {
106 | case ex: Exception =>
107 | extractUri { uri =>
108 | val msg = s"Request to $uri could not be handled normally: Exception: ${ex.getCause} : ${ex.getMessage}"
109 | system.log.error(msg)
110 | complete(HttpResponse(StatusCodes.InternalServerError, entity = msg))
111 | }
112 | }
113 |
114 | lazy val psRoutes: Route =
115 | pathPrefix("artifactState") {
116 | concat(
117 | // QUERIES:
118 | pathPrefix("isArtifactReadByUser") {
119 | concat(
120 | get {
121 | parameters("artifactId".as[Long], "userId") { (artifactId, userId) =>
122 | complete {
123 | queryArtifactRead(ArtifactAndUser(artifactId, userId))
124 | }
125 | }
126 | },
127 | post {
128 | entity(as[ArtifactAndUser]) { req =>
129 | complete(StatusCodes.OK, queryArtifactRead(req))
130 | }
131 | })
132 | },
133 | pathPrefix("isArtifactInUserFeed") {
134 | concat(
135 | get {
136 | parameters("artifactId".as[Long], "userId") { (artifactId, userId) =>
137 | val req = ArtifactAndUser(artifactId, userId)
138 | complete(queryArtifactInUserFeed(req))
139 | }
140 | },
141 | post {
142 | entity(as[ArtifactAndUser]) { req =>
143 | complete(StatusCodes.OK, queryArtifactInUserFeed(req))
144 | }
145 | })
146 | },
147 | pathPrefix("getAllStates") {
148 | concat(
149 | get {
150 | parameters("artifactId".as[Long], "userId") { (artifactId, userId) =>
151 | val req = ArtifactAndUser(artifactId, userId)
152 | complete(queryAllStates(req))
153 | }
154 | },
155 | post {
156 | entity(as[ArtifactAndUser]) { req =>
157 | complete(StatusCodes.OK, queryAllStates(req))
158 | }
159 | })
160 | },
161 |
162 | // COMMANDS:
163 | pathPrefix("setArtifactReadByUser") {
164 | post {
165 | entity(as[ArtifactAndUser]) { req =>
166 | complete {
167 | cmdArtifactRead(req)
168 | }
169 | }
170 | }
171 | },
172 | pathPrefix("setArtifactAddedToUserFeed") {
173 | post {
174 | entity(as[ArtifactAndUser]) { req =>
175 | complete {
176 | cmdArtifactAddedToUserFeed(req)
177 | }
178 | }
179 | }
180 | },
181 | pathPrefix("setArtifactRemovedFromUserFeed") {
182 | post {
183 | entity(as[ArtifactAndUser]) { req =>
184 | complete {
185 | cmdArtifactRemovedFromUserFeed(req)
186 | }
187 | }
188 | }
189 | })
190 | }
191 |
192 | }
193 |
--------------------------------------------------------------------------------
/telemetry/akka-dashboards/akka-routers.json:
--------------------------------------------------------------------------------
1 | {
2 | "id": null,
3 | "title": "Akka Routers",
4 | "tags": [],
5 | "style": "dark",
6 | "timezone": "browser",
7 | "editable": true,
8 | "hideControls": true,
9 | "sharedCrosshair": false,
10 | "rows": [
11 | {
12 | "title": "",
13 | "collapse": false,
14 | "editable": true,
15 | "height": "250px",
16 | "panels": [
17 | {
18 | "datasource": "Cinnamon Prometheus",
19 | "title": "Routed messages: 1min rate",
20 | "description": "",
21 | "type": "timeseries",
22 | "id": 1,
23 | "interval": "10s",
24 | "fieldConfig": {
25 | "defaults": {
26 | "custom": {
27 | "drawStyle": "line",
28 | "lineInterpolation": "linear",
29 | "lineWidth": 2,
30 | "fillOpacity": 10,
31 | "gradientMode": "none",
32 | "showPoints": "never",
33 | "stacking": {
34 | "mode": "none"
35 | },
36 | "axisPlacement": "auto",
37 | "axisLabel": "messages/second"
38 | },
39 | "color": {
40 | "mode": "palette-classic"
41 | },
42 | "unit": "ops",
43 | "min": 0,
44 | "max": null
45 | }
46 | },
47 | "options": {
48 | "legend": {
49 | "showLegend": false
50 | },
51 | "tooltip": {
52 | "mode": "multi",
53 | "sort": "desc"
54 | }
55 | },
56 | "span": 6,
57 | "targets": [
58 | {
59 | "expr": "irate(akka_message_router_routed_messages{host=~\"$Servers\", application=~\"$Applications\", actor_system=~\"$ActorSystems\", dispatcher=~\"$Dispatchers\", router=~\"$Routers\", message=~\"$Messages\"}[1m])",
60 | "format": "time_series",
61 | "legendFormat": "{{host}} / {{dispatcher}} / {{router}} / {{message}}"
62 | }
63 | ]
64 | },
65 | {
66 | "datasource": "Cinnamon Prometheus",
67 | "title": "Router processing time ($Quantile quantile)",
68 | "description": "",
69 | "type": "timeseries",
70 | "id": 2,
71 | "interval": "10s",
72 | "fieldConfig": {
73 | "defaults": {
74 | "custom": {
75 | "drawStyle": "line",
76 | "lineInterpolation": "linear",
77 | "lineWidth": 2,
78 | "fillOpacity": 10,
79 | "gradientMode": "none",
80 | "showPoints": "never",
81 | "stacking": {
82 | "mode": "none"
83 | },
84 | "axisPlacement": "auto",
85 | "axisLabel": null
86 | },
87 | "color": {
88 | "mode": "palette-classic"
89 | },
90 | "unit": "ns",
91 | "min": 0,
92 | "max": null
93 | }
94 | },
95 | "options": {
96 | "legend": {
97 | "showLegend": false
98 | },
99 | "tooltip": {
100 | "mode": "multi",
101 | "sort": "desc"
102 | }
103 | },
104 | "span": 6,
105 | "targets": [
106 | {
107 | "expr": "akka_message_router_processing_time_ns{host=~\"$Servers\", application=~\"$Applications\", actor_system=~\"$ActorSystems\", dispatcher=~\"$Dispatchers\", router=~\"$Routers\", message=~\"$Messages\", quantile=~\"$Quantile\"}",
108 | "format": "time_series",
109 | "legendFormat": "{{host}} / {{dispatcher}} / {{router}} / {{message}}"
110 | }
111 | ]
112 | }
113 | ]
114 | }
115 | ],
116 | "time": {
117 | "from": "now-15m",
118 | "to": "now"
119 | },
120 | "timepicker": {
121 | "refresh_intervals": [
122 | "5s",
123 | "10s",
124 | "30s",
125 | "1m",
126 | "5m",
127 | "15m",
128 | "30m",
129 | "1h",
130 | "2h",
131 | "1d"
132 | ],
133 | "time_options": [
134 | "5m",
135 | "15m",
136 | "1h",
137 | "6h",
138 | "12h",
139 | "24h",
140 | "2d",
141 | "7d",
142 | "30d"
143 | ]
144 | },
145 | "templating": {
146 | "list": [
147 | {
148 | "current": {},
149 | "datasource": "Cinnamon Prometheus",
150 | "hide": 0,
151 | "includeAll": true,
152 | "allValue": ".*",
153 | "multi": true,
154 | "name": "Applications",
155 | "options": [],
156 | "query": "label_values(application)",
157 | "refresh": 2,
158 | "regex": "",
159 | "tagValuesQuery": "",
160 | "tagsQuery": "name",
161 | "type": "query",
162 | "useTags": false
163 | },
164 | {
165 | "current": {},
166 | "datasource": "Cinnamon Prometheus",
167 | "hide": 0,
168 | "includeAll": true,
169 | "allValue": ".*",
170 | "multi": true,
171 | "name": "Servers",
172 | "options": [],
173 | "query": "label_values(akka_router_router_routed_messages{application=~\"$Applications\"}, host)",
174 | "refresh": 2,
175 | "regex": "",
176 | "tagValuesQuery": "",
177 | "tagsQuery": "name",
178 | "type": "query",
179 | "useTags": false
180 | },
181 | {
182 | "current": {},
183 | "datasource": "Cinnamon Prometheus",
184 | "hide": 0,
185 | "includeAll": true,
186 | "allValue": ".*",
187 | "multi": true,
188 | "name": "ActorSystems",
189 | "options": [],
190 | "query": "label_values(akka_router_router_routed_messages{host=~\"$Servers\", application=~\"$Applications\"}, actor_system)",
191 | "refresh": 2,
192 | "regex": "",
193 | "tagValuesQuery": "",
194 | "tagsQuery": "name",
195 | "type": "query",
196 | "useTags": false
197 | },
198 | {
199 | "current": {},
200 | "datasource": "Cinnamon Prometheus",
201 | "hide": 0,
202 | "includeAll": true,
203 | "allValue": ".*",
204 | "multi": true,
205 | "name": "Dispatchers",
206 | "options": [],
207 | "query": "label_values(akka_router_router_routed_messages{host=~\"$Servers\", application=~\"$Applications\", actor_system=~\"$ActorSystems\"}, dispatcher)",
208 | "refresh": 2,
209 | "regex": "",
210 | "tagValuesQuery": "",
211 | "tagsQuery": "name",
212 | "type": "query",
213 | "useTags": false
214 | },
215 | {
216 | "current": {},
217 | "datasource": "Cinnamon Prometheus",
218 | "hide": 0,
219 | "includeAll": true,
220 | "allValue": ".*",
221 | "multi": true,
222 | "name": "Routers",
223 | "options": [],
224 | "query": "label_values(akka_router_router_routed_messages{host=~\"$Servers\", application=~\"$Applications\", actor_system=~\"$ActorSystems\", dispatcher=~\"$Dispatchers\"}, router)",
225 | "refresh": 2,
226 | "regex": "",
227 | "tagValuesQuery": "",
228 | "tagsQuery": "name",
229 | "type": "query",
230 | "useTags": false
231 | },
232 | {
233 | "current": {},
234 | "datasource": "Cinnamon Prometheus",
235 | "hide": 0,
236 | "includeAll": true,
237 | "allValue": ".*",
238 | "multi": true,
239 | "name": "Messages",
240 | "options": [],
241 | "query": "label_values(akka_message_router_routed_messages{host=~\"$Servers\", application=~\"$Applications\", actor_system=~\"$ActorSystems\", dispatcher=~\"$Dispatchers\", router=~\"$Routers\"}, message)",
242 | "refresh": 2,
243 | "regex": "",
244 | "tagValuesQuery": "",
245 | "tagsQuery": "name",
246 | "type": "query",
247 | "useTags": false
248 | },
249 | {
250 | "current": {},
251 | "datasource": "Cinnamon Prometheus",
252 | "hide": 0,
253 | "includeAll": false,
254 | "allValue": ".*",
255 | "multi": false,
256 | "name": "Quantile",
257 | "options": [],
258 | "query": "label_values(akka_message_router_processing_time_ns, quantile)",
259 | "refresh": 2,
260 | "regex": "",
261 | "tagValuesQuery": "",
262 | "tagsQuery": "name",
263 | "type": "query",
264 | "useTags": false
265 | }
266 | ]
267 | },
268 | "annotations": {
269 | "list": []
270 | },
271 | "refresh": false,
272 | "schemaVersion": 12,
273 | "version": 3,
274 | "links": [],
275 | "gnetId": null
276 | }
--------------------------------------------------------------------------------
/REF.md:
--------------------------------------------------------------------------------
1 | # Distributed State PoC w/ Akka Typed and Persistent Cluster Sharding
2 |
3 | ## Under Construction - TODO Intro & Article
4 |
5 | ## Lightbend Platform Subscription Required
6 | You need to have a current [Lightbend Platform Subscription](https://www.lightbend.com/lightbend-platform-subscription) because this project is taking advantage of Lightbend Telemetry as well as Akka Resilience Enhancements:
7 | - [Lightbend Telemetry](https://developer.lightbend.com/docs/telemetry/current/home.html)
8 | - [Split Brain Resolver](https://doc.akka.io/docs/akka-enhancements/current/split-brain-resolver.html)
9 | - [Akka Thread Starvation Detector](https://doc.akka.io/docs/akka-enhancements/current/starvation-detector.html)
10 |
11 |
12 | # How to run and test
13 |
14 | ## To run (locally):
15 | 1. Start Telemetry (Cinnamon) Elasticsearch Sandbox in Docker:
16 | - switch to the local Cinnamon elastic search directory
17 | - issue command: docker-compose up
18 |
19 | 2. Start a local Cassandra version in Docker:
20 | docker-compose -f docker-compose-cassandra.yml up
21 |
22 | 3. To start the entity cluster:
23 | sbt '; set javaOptions += "-Dconfig.resource=cluster-application.conf" ; run'
24 |
25 | 4. To start the HTTP server:
26 | sbt '; set javaOptions += "-Dconfig.resource=endpoint-application.conf" ; run'
27 |
28 | 5. Wait until the cluster issues "Welcome" to http server, or you see [Up] for the new node. Then all is ready.
29 |
30 | ## API Testing basic sanity w/ curl commands:
31 | ### Artifact / User Read
32 | ```
33 | curl -d '{"artifactId":1, "userId":"Michael"}' -H "Content-Type: application/json" -X POST http://localhost:8082/artifactState/setArtifactReadByUser
34 | curl -d '{"artifactId":1, "userId":"Michael"}' -H "Content-Type: application/json" -X POST http://localhost:8082/artifactState/isArtifactReadByUser
35 | ```
36 | ### Artifact / User Feed
37 | ```
38 | curl -d '{"artifactId":1, "userId":"Michael"}' -H "Content-Type: application/json" -X POST http://localhost:8082/artifactState/setArtifactAddedToUserFeed
39 | curl -d '{"artifactId":1, "userId":"Michael"}' -H "Content-Type: application/json" -X POST http://localhost:8082/artifactState/isArtifactInUserFeed
40 | curl -d '{"artifactId":1, "userId":"Michael"}' -H "Content-Type: application/json" -X POST http://localhost:8082/artifactState/setArtifactRemovedFromUserFeed
41 | ```
42 |
43 | ### Query All States
44 | ```
45 | curl -d '{"artifactId":1, "userId":"Michael"}' -H "Content-Type: application/json" -X POST http://localhost:8082/artifactState/getAllStates
46 | curl 'http://localhost:8082/artifactState/getAllStates?artifactId=1&userId=Michael'
47 | ```
48 |
49 | ## grpcurl
50 | https://github.com/fullstorydev/grpcurl
51 |
52 | ### Listing Services
53 | ```
54 | grpcurl -plaintext localhost:8082 list
55 | ```
56 |
57 | ### Artifact / User Read
58 | ```
59 | grpcurl -plaintext -d '{"artifactId":1, "userId":"Michael"}' localhost:8082 ArtifactStateService/SetArtifactReadByUser
60 | grpcurl -plaintext -d '{"artifactId":1, "userId":"Michael"}' localhost:8082 ArtifactStateService/IsArtifactReadByUser
61 | ```
62 |
63 | ### Artifact / User Feed
64 | ```
65 | grpcurl -plaintext -d '{"artifactId":1, "userId":"Michael"}' localhost:8082 ArtifactStateService/SetArtifactAddedToUserFeed
66 | grpcurl -plaintext -d '{"artifactId":1, "userId":"Michael"}' localhost:8082 ArtifactStateService/IsArtifactInUserFeed
67 | grpcurl -plaintext -d '{"artifactId":1, "userId":"Michael"}' localhost:8082 ArtifactStateService/SetArtifactRemovedFromUserFeed
68 | ```
69 |
70 | ### Query All States
71 | ```
72 | grpcurl -plaintext -d '{"artifactId":1, "userId":"Michael"}' localhost:8082 ArtifactStateService/GetAllStates
73 | ```
74 |
75 | ## To test API:
76 | Testing relies on multi-jvm, and Cassandra for testing of internal cluster api, as well as HTTP end-to-end integration.
77 |
78 | From within sbt issue the following command:
79 | ```
80 | multi-jvm:test
81 | ```
82 | ## To run in local Docker
83 | These steps create two docker images: akka-typed-state-poc/endpoint, and akka-typed-state-poc/cluster. When run, the endpoint is exposed on localhost:8082 so all the examples above should run properly once the cluster has formed.
84 |
85 | Note: this example in Docker is running Cassandra in a local container with default config.
86 |
87 | 1. Run sbt in the **project's root** directory.
88 | 2. Build docker images by issuing: `docker:publishLocal`
89 | 3. In a terminal, from the **project's root** directory, issue the command: `docker-compose up`
90 | 4. The `endpoint` should be available on the `http://localhost:8082` as it is when running locally.
91 |
92 | ### To access persistent entity event logs in Cassandra's CQLSH
93 | 1. From a terminal window, enter: `docker exec -it akka-typed-persistent-state-poc_cassandra_db_1 sh`
94 | 2. From the command prompt, enter: `cqlsh`
95 | 3. In CQLSH, enter: `use akka;`
96 | 4. To see the layout of the messages table, enter: `describe messages;`
97 | 5. To dump message events, enter: `select * from messages;`
98 |
99 | ## To run in Minishift / Openshift
100 | TODO
101 |
102 | # Pros & Cons of this approach vs traditional microservice w/ db persistence
103 |
104 | Following is a break down of the Pros / Cons of two approaches to solving the problem of building a distributed cache:
105 |
106 | 1. Akka Cluster Sharding w/ Persistent Entities
107 | 2. Microservice w/ database persistence (without Actor system)
108 |
109 | ## Akka Cluster Sharding w/ Persistent Entities
110 | Lightbend believes in it's Akka Framework for building reactive microservices that meet the promise of the [Reactive Manafesto](https://www.reactivemanifesto.org/). Reactive microservices have the following qualities: Responsive, Resilient, Elastic, and Message Driven.
111 |
112 | [Akka Cluster](https://doc.akka.io/docs/akka/current/index-cluster.html) provides a fault-tolerant decentralized peer-to-peer based cluster [membership](https://doc.akka.io/docs/akka/current/common/cluster.html#membership) service with no single point of failure or single point of bottleneck. It does this using [gossip](https://doc.akka.io/docs/akka/current/common/cluster.html#gossip) protocols and an automatic [failure detector](https://doc.akka.io/docs/akka/current/common/cluster.html#failure-detector).
113 |
114 | Akka's Cluster allows for building distributed applications, where one application or service spans multiple nodes (in practice multiple ActorSystems). See also the discussion in [When and where to use Akka Cluster](https://doc.akka.io/docs/akka/current/cluster-usage.html#when-and-where-to-use-akka-cluster).
115 |
116 | Akka's [Cluster Sharding](https://doc.akka.io/docs/akka/current/cluster-sharding.html) leverages the features Akka Cluster for distributed computing while enabling simple to code persistent entities through CQRS / ES. Cluster sharding is useful when you need to distribute actors across several nodes in the cluster and want to be able to interact with them using their logical identifier, but without having to care about their physical location in the cluster, which might also change over time.
117 |
118 | ### Pros
119 | - easy to code and maintain
120 |
121 | Actors provide:
122 | - Easy to Scale
123 | - Fault Tolerant
124 | - Geographical Distribution
125 | - No Shared State
126 |
127 | Akka cluster / sharding provides:
128 | - No single point of failure
129 | - scalability for distribution and persistence of state
130 | - easy projection of events over time to other systems for use cases such as metrics collection
131 |
132 | ### Cons
133 | - introduces new concepts that may not be well known
134 |
135 | Actors can be:
136 | - susceptible to overflowing mail boxes
137 |
138 | Akka cluster / sharding:
139 | - can be difficult for DevOps to deploy
140 | - complete stop / restart when changing configuration of sharding
141 |
142 | ## Microservice w/ database persistence (without Actor system)
143 |
144 | Building a distributed, highly scalable cache with a persistent backing and recovery is difficult to do.
145 |
146 | ### Pros
147 | - uses concepts that are probably well known
148 |
149 | ### Cons
150 | - hard to scale while distributing state
151 | - potential for bottlenecks if not distributed
152 | - code must be created to save state
153 | - code must be created to recover state
154 |
155 | # Your Akka Cluster on AWS / ECS
156 | For deployment to an AWS / ECS production environment we recommend [Akka Bootstrap](https://developer.lightbend.com/docs/akka-management/current/bootstrap.html).
157 | For an example of the configuration required for the PoC in code on AWS / ECS using Akka Bootstrap please see this [Github repository](https://github.com/akka/akka-management/tree/master/bootstrap-joining-demo/aws-api-ecs).
158 |
159 | Warning: If you’re extending **application.conf**, please make sure your new configuration file sets **akka.cluster.seed-nodes** to **null** as this setting conflicts with Akka Bootstrap. If your configuration is completely in code, then akka.cluster.seed-nodes should not be set at all.
160 |
161 | ## gRPC Curl Commands
162 | - grpcurl -plaintext localhost:8082 list
163 | - grpcurl -plaintext -d '{"artifactId":1, "userId":"Michael"}' localhost:8082 ArtifactStateService/SetArtifactReadByUser
164 | - grpcurl -plaintext -d '{"artifactId":1, "userId":"Michael"}' localhost:8082 ArtifactStateService/SetArtifactAddedToUserFeed
165 | - grpcurl -plaintext -d '{"artifactId":1, "userId":"Michael"}' localhost:8082 ArtifactStateService/SetArtifactRemovedFromUserFeed
166 | - grpcurl -plaintext -d '{"artifactId":1, "userId":"Michael"}' localhost:8082 ArtifactStateService/IsArtifactReadByUser
167 | - grpcurl -plaintext -d '{"artifactId":1, "userId":"Michael"}' localhost:8082 ArtifactStateService/IsArtifactInUserFeed
168 | - grpcurl -plaintext -d '{"artifactId":1, "userId":"Michael"}' localhost:8082 ArtifactStateService/GetAllStates
169 |
--------------------------------------------------------------------------------
/telemetry/akka-dashboards/akka-http-endpoints.json:
--------------------------------------------------------------------------------
1 | {
2 | "id": null,
3 | "title": "Akka HTTP Endpoints",
4 | "tags": [],
5 | "style": "dark",
6 | "timezone": "browser",
7 | "editable": true,
8 | "hideControls": true,
9 | "sharedCrosshair": false,
10 | "rows": [
11 | {
12 | "title": "",
13 | "collapse": false,
14 | "editable": true,
15 | "height": "250px",
16 | "panels": [
17 | {
18 | "datasource": "Cinnamon Prometheus",
19 | "title": "Response rate: $Methods $Endpoints",
20 | "description": "",
21 | "type": "timeseries",
22 | "id": 1,
23 | "interval": "10s",
24 | "fieldConfig": {
25 | "defaults": {
26 | "custom": {
27 | "drawStyle": "line",
28 | "lineInterpolation": "linear",
29 | "lineWidth": 2,
30 | "fillOpacity": 10,
31 | "gradientMode": "none",
32 | "showPoints": "never",
33 | "stacking": {
34 | "mode": "none"
35 | },
36 | "axisPlacement": "auto",
37 | "axisLabel": "ops/second"
38 | },
39 | "color": {
40 | "mode": "palette-classic"
41 | },
42 | "unit": "ops",
43 | "min": 0,
44 | "max": null
45 | }
46 | },
47 | "options": {
48 | "legend": {
49 | "showLegend": false
50 | },
51 | "tooltip": {
52 | "mode": "multi",
53 | "sort": "desc"
54 | }
55 | },
56 | "span": 6,
57 | "targets": [
58 | {
59 | "expr": "irate(akka_http_request_path_endpoint_responses{host=~\"$Servers\", application=~\"$Applications\", actor_system=~\"$ActorSystems\", http_server=~\"$HttpServers\", request_method=~\"$Methods\", request_path=~\"$Endpoints\", http_server=~\".*:$HttpPorts\"}[1m])",
60 | "format": "time_series",
61 | "legendFormat": "{{application}} / {{host}} / {{http_server}} / {{request_method}}"
62 | }
63 | ]
64 | },
65 | {
66 | "datasource": "Cinnamon Prometheus",
67 | "title": "Response time: $Methods $Endpoints ($Quantile quantile)",
68 | "description": "",
69 | "type": "timeseries",
70 | "id": 2,
71 | "interval": "10s",
72 | "fieldConfig": {
73 | "defaults": {
74 | "custom": {
75 | "drawStyle": "line",
76 | "lineInterpolation": "linear",
77 | "lineWidth": 2,
78 | "fillOpacity": 10,
79 | "gradientMode": "none",
80 | "showPoints": "never",
81 | "stacking": {
82 | "mode": "none"
83 | },
84 | "axisPlacement": "auto",
85 | "axisLabel": null
86 | },
87 | "color": {
88 | "mode": "palette-classic"
89 | },
90 | "unit": "ns",
91 | "min": 0,
92 | "max": null
93 | }
94 | },
95 | "options": {
96 | "legend": {
97 | "showLegend": false
98 | },
99 | "tooltip": {
100 | "mode": "multi",
101 | "sort": "desc"
102 | }
103 | },
104 | "span": 6,
105 | "targets": [
106 | {
107 | "expr": "akka_http_request_path_endpoint_response_time_ns{host=~\"$Servers\", application=~\"$Applications\", actor_system=~\"$ActorSystems\", http_server=~\"$HttpServers\", request_method=~\"$Methods\", request_path=~\"$Endpoints\", quantile=~\"$Quantile\", http_server=~\".*:$HttpPorts\"}",
108 | "format": "time_series",
109 | "legendFormat": "{{application}} / {{host}} / {{http_server}} / {{request_method}}"
110 | }
111 | ]
112 | }
113 | ],
114 | "repeat": "Endpoints",
115 | "title": "$Endpoints"
116 | }
117 | ],
118 | "time": {
119 | "from": "now-15m",
120 | "to": "now"
121 | },
122 | "timepicker": {
123 | "refresh_intervals": [
124 | "5s",
125 | "10s",
126 | "30s",
127 | "1m",
128 | "5m",
129 | "15m",
130 | "30m",
131 | "1h",
132 | "2h",
133 | "1d"
134 | ],
135 | "time_options": [
136 | "5m",
137 | "15m",
138 | "1h",
139 | "6h",
140 | "12h",
141 | "24h",
142 | "2d",
143 | "7d",
144 | "30d"
145 | ]
146 | },
147 | "templating": {
148 | "list": [
149 | {
150 | "current": {},
151 | "datasource": "Cinnamon Prometheus",
152 | "hide": 0,
153 | "includeAll": true,
154 | "allValue": ".*",
155 | "multi": true,
156 | "name": "Applications",
157 | "options": [],
158 | "query": "label_values(application)",
159 | "refresh": 2,
160 | "regex": "",
161 | "tagValuesQuery": "",
162 | "tagsQuery": "name",
163 | "type": "query",
164 | "useTags": false
165 | },
166 | {
167 | "current": {},
168 | "datasource": "Cinnamon Prometheus",
169 | "hide": 0,
170 | "includeAll": true,
171 | "allValue": ".*",
172 | "multi": true,
173 | "name": "Servers",
174 | "options": [],
175 | "query": "label_values(akka_http_http_server_connections{application=~\"$Applications\"}, host)",
176 | "refresh": 2,
177 | "regex": "",
178 | "tagValuesQuery": "",
179 | "tagsQuery": "name",
180 | "type": "query",
181 | "useTags": false
182 | },
183 | {
184 | "current": {},
185 | "datasource": "Cinnamon Prometheus",
186 | "hide": 0,
187 | "includeAll": true,
188 | "allValue": ".*",
189 | "multi": true,
190 | "name": "ActorSystems",
191 | "options": [],
192 | "query": "label_values(akka_http_http_server_connections{host=~\"$Servers\", application=~\"$Applications\"}, actor_system)",
193 | "refresh": 2,
194 | "regex": "",
195 | "tagValuesQuery": "",
196 | "tagsQuery": "name",
197 | "type": "query",
198 | "useTags": false
199 | },
200 | {
201 | "current": {},
202 | "datasource": "Cinnamon Prometheus",
203 | "hide": 0,
204 | "includeAll": true,
205 | "allValue": ".*",
206 | "multi": true,
207 | "name": "HttpServers",
208 | "options": [],
209 | "query": "label_values(akka_http_http_server_connections{host=~\"$Servers\", application=~\"$Applications\", actor_system=~\"$ActorSystems\"}, http_server)",
210 | "refresh": 2,
211 | "regex": "",
212 | "tagValuesQuery": "",
213 | "tagsQuery": "name",
214 | "type": "query",
215 | "useTags": false
216 | },
217 | {
218 | "current": {},
219 | "datasource": "Cinnamon Prometheus",
220 | "hide": 0,
221 | "includeAll": true,
222 | "allValue": ".*",
223 | "multi": true,
224 | "name": "HttpPorts",
225 | "options": [],
226 | "query": "label_values(akka_http_request_path_endpoint_responses{host=~\"$Servers\", application=~\"$Applications\", actor_system=~\"$ActorSystems\"}, http_server)",
227 | "refresh": 2,
228 | "regex": ".*:(.*)",
229 | "tagValuesQuery": "",
230 | "tagsQuery": "name",
231 | "type": "query",
232 | "useTags": false
233 | },
234 | {
235 | "current": {},
236 | "datasource": "Cinnamon Prometheus",
237 | "hide": 0,
238 | "includeAll": true,
239 | "allValue": ".*",
240 | "multi": true,
241 | "name": "Methods",
242 | "options": [],
243 | "query": "label_values(akka_http_request_path_endpoint_responses{host=~\"$Servers\", application=~\"$Applications\", actor_system=~\"$ActorSystems\", http_server=~\"$HttpServers\"}, request_method)",
244 | "refresh": 2,
245 | "regex": "",
246 | "tagValuesQuery": "",
247 | "tagsQuery": "name",
248 | "type": "query",
249 | "useTags": false
250 | },
251 | {
252 | "current": {},
253 | "datasource": "Cinnamon Prometheus",
254 | "hide": 0,
255 | "includeAll": true,
256 | "allValue": ".*",
257 | "multi": true,
258 | "name": "Endpoints",
259 | "options": [],
260 | "query": "label_values(akka_http_request_path_endpoint_responses{host=~\"$Servers\", application=~\"$Applications\", actor_system=~\"$ActorSystems\", http_server=~\"$HttpServers\", request_method=~\"$Methods\"}, request_path)",
261 | "refresh": 2,
262 | "regex": "",
263 | "tagValuesQuery": "",
264 | "tagsQuery": "name",
265 | "type": "query",
266 | "useTags": false
267 | },
268 | {
269 | "current": {},
270 | "datasource": "Cinnamon Prometheus",
271 | "hide": 0,
272 | "includeAll": false,
273 | "allValue": ".*",
274 | "multi": false,
275 | "name": "Quantile",
276 | "options": [],
277 | "query": "label_values(akka_http_request_path_endpoint_response_time_ns{host=~\"$Servers\", application=~\"$Applications\"}, quantile)",
278 | "refresh": 2,
279 | "regex": "",
280 | "tagValuesQuery": "",
281 | "tagsQuery": "name",
282 | "type": "query",
283 | "useTags": false
284 | }
285 | ]
286 | },
287 | "annotations": {
288 | "list": []
289 | },
290 | "refresh": false,
291 | "schemaVersion": 12,
292 | "version": 3,
293 | "links": [],
294 | "gnetId": null
295 | }
--------------------------------------------------------------------------------
/telemetry/akka-dashboards/akka-stopwatches.json:
--------------------------------------------------------------------------------
1 | {
2 | "id": null,
3 | "title": "Stopwatch Metrics",
4 | "tags": [],
5 | "style": "dark",
6 | "timezone": "browser",
7 | "editable": true,
8 | "hideControls": true,
9 | "sharedCrosshair": false,
10 | "rows": [
11 | {
12 | "title": "",
13 | "collapse": false,
14 | "editable": true,
15 | "height": "250px",
16 | "panels": [
17 | {
18 | "datasource": "Cinnamon Prometheus",
19 | "title": "Stopwatch Time ($Quantile quantile)",
20 | "description": "",
21 | "type": "timeseries",
22 | "id": 1,
23 | "interval": "10s",
24 | "fieldConfig": {
25 | "defaults": {
26 | "custom": {
27 | "drawStyle": "line",
28 | "lineInterpolation": "linear",
29 | "lineWidth": 2,
30 | "fillOpacity": 10,
31 | "gradientMode": "none",
32 | "showPoints": "never",
33 | "stacking": {
34 | "mode": "none"
35 | },
36 | "axisPlacement": "auto",
37 | "axisLabel": null
38 | },
39 | "color": {
40 | "mode": "palette-classic"
41 | },
42 | "unit": "ns",
43 | "min": 0,
44 | "max": null
45 | }
46 | },
47 | "options": {
48 | "legend": {
49 | "showLegend": false
50 | },
51 | "tooltip": {
52 | "mode": "multi",
53 | "sort": "desc"
54 | }
55 | },
56 | "span": 6,
57 | "targets": [
58 | {
59 | "expr": "stopwatch_stopwatch_time_ns{host=~\"$Servers\", application=~\"$Applications\", stopwatch=~\"$StopwatchIdentifiers\", quantile=~\"$Quantile\"}",
60 | "format": "time_series",
61 | "legendFormat": "{{application}} / {{host}}"
62 | }
63 | ]
64 | },
65 | {
66 | "datasource": "Cinnamon Prometheus",
67 | "title": "Stopwatch Stops: 1min rate",
68 | "description": "",
69 | "type": "timeseries",
70 | "id": 2,
71 | "interval": "10s",
72 | "fieldConfig": {
73 | "defaults": {
74 | "custom": {
75 | "drawStyle": "line",
76 | "lineInterpolation": "linear",
77 | "lineWidth": 2,
78 | "fillOpacity": 10,
79 | "gradientMode": "none",
80 | "showPoints": "never",
81 | "stacking": {
82 | "mode": "none"
83 | },
84 | "axisPlacement": "auto",
85 | "axisLabel": "messages/second"
86 | },
87 | "color": {
88 | "mode": "palette-classic"
89 | },
90 | "unit": "ops",
91 | "min": 0,
92 | "max": null
93 | }
94 | },
95 | "options": {
96 | "legend": {
97 | "showLegend": false
98 | },
99 | "tooltip": {
100 | "mode": "multi",
101 | "sort": "desc"
102 | }
103 | },
104 | "span": 6,
105 | "targets": [
106 | {
107 | "expr": "irate(stopwatch_stopwatch_rate{host=~\"$Servers\", application=~\"$Applications\", stopwatch=~\"$StopwatchIdentifiers\"}[1m])",
108 | "format": "time_series",
109 | "legendFormat": "{{application}} / {{host}}"
110 | }
111 | ]
112 | }
113 | ]
114 | },
115 | {
116 | "title": "",
117 | "collapse": false,
118 | "editable": true,
119 | "height": "250px",
120 | "panels": [
121 | {
122 | "datasource": "Cinnamon Prometheus",
123 | "title": "Stopwatch Active Time ($Quantile quantile)",
124 | "description": "",
125 | "type": "timeseries",
126 | "id": 3,
127 | "interval": "10s",
128 | "fieldConfig": {
129 | "defaults": {
130 | "custom": {
131 | "drawStyle": "line",
132 | "lineInterpolation": "linear",
133 | "lineWidth": 2,
134 | "fillOpacity": 10,
135 | "gradientMode": "none",
136 | "showPoints": "never",
137 | "stacking": {
138 | "mode": "none"
139 | },
140 | "axisPlacement": "auto",
141 | "axisLabel": null
142 | },
143 | "color": {
144 | "mode": "palette-classic"
145 | },
146 | "unit": "ns",
147 | "min": 0,
148 | "max": null
149 | }
150 | },
151 | "options": {
152 | "legend": {
153 | "showLegend": false
154 | },
155 | "tooltip": {
156 | "mode": "multi",
157 | "sort": "desc"
158 | }
159 | },
160 | "span": 6,
161 | "targets": [
162 | {
163 | "expr": "stopwatch_stopwatch_active_time_ns{host=~\"$Servers\", application=~\"$Applications\", stopwatch=~\"$StopwatchIdentifiers\", quantile=~\"$Quantile\"}",
164 | "format": "time_series",
165 | "legendFormat": "{{application}} / {{host}}"
166 | }
167 | ]
168 | },
169 | {
170 | "datasource": "Cinnamon Prometheus",
171 | "title": "Stopwatch Threshold Breaches: 1min rate",
172 | "description": "",
173 | "type": "timeseries",
174 | "id": 4,
175 | "interval": "10s",
176 | "fieldConfig": {
177 | "defaults": {
178 | "custom": {
179 | "drawStyle": "line",
180 | "lineInterpolation": "linear",
181 | "lineWidth": 2,
182 | "fillOpacity": 10,
183 | "gradientMode": "none",
184 | "showPoints": "never",
185 | "stacking": {
186 | "mode": "none"
187 | },
188 | "axisPlacement": "auto",
189 | "axisLabel": "messages/second"
190 | },
191 | "color": {
192 | "mode": "palette-classic"
193 | },
194 | "unit": "ops",
195 | "min": 0,
196 | "max": null
197 | }
198 | },
199 | "options": {
200 | "legend": {
201 | "showLegend": false
202 | },
203 | "tooltip": {
204 | "mode": "multi",
205 | "sort": "desc"
206 | }
207 | },
208 | "span": 6,
209 | "targets": [
210 | {
211 | "expr": "irate(stopwatch_stopwatch_time_limit{host=~\"$Servers\", application=~\"$Applications\", stopwatch=~\"$StopwatchIdentifiers\"}[1m])",
212 | "format": "time_series",
213 | "legendFormat": "{{application}} / {{host}}"
214 | }
215 | ]
216 | }
217 | ]
218 | }
219 | ],
220 | "time": {
221 | "from": "now-15m",
222 | "to": "now"
223 | },
224 | "timepicker": {
225 | "refresh_intervals": [
226 | "5s",
227 | "10s",
228 | "30s",
229 | "1m",
230 | "5m",
231 | "15m",
232 | "30m",
233 | "1h",
234 | "2h",
235 | "1d"
236 | ],
237 | "time_options": [
238 | "5m",
239 | "15m",
240 | "1h",
241 | "6h",
242 | "12h",
243 | "24h",
244 | "2d",
245 | "7d",
246 | "30d"
247 | ]
248 | },
249 | "templating": {
250 | "list": [
251 | {
252 | "current": {},
253 | "datasource": "Cinnamon Prometheus",
254 | "hide": 0,
255 | "includeAll": true,
256 | "allValue": ".*",
257 | "multi": true,
258 | "name": "Applications",
259 | "options": [],
260 | "query": "label_values(application)",
261 | "refresh": 2,
262 | "regex": "",
263 | "tagValuesQuery": "",
264 | "tagsQuery": "name",
265 | "type": "query",
266 | "useTags": false
267 | },
268 | {
269 | "current": {},
270 | "datasource": "Cinnamon Prometheus",
271 | "hide": 0,
272 | "includeAll": true,
273 | "allValue": ".*",
274 | "multi": true,
275 | "name": "Servers",
276 | "options": [],
277 | "query": "label_values(akka_actor_running_actors{application=~\"$Applications\"}, host)",
278 | "refresh": 2,
279 | "regex": "",
280 | "tagValuesQuery": "",
281 | "tagsQuery": "name",
282 | "type": "query",
283 | "useTags": false
284 | },
285 | {
286 | "current": {},
287 | "datasource": "Cinnamon Prometheus",
288 | "hide": 0,
289 | "includeAll": true,
290 | "allValue": ".*",
291 | "multi": true,
292 | "name": "StopwatchIdentifiers",
293 | "options": [],
294 | "query": "label_values(stopwatch_stopwatch_rate{host=~\"$Servers\", application=~\"$Applications\"}, stopwatch)",
295 | "refresh": 2,
296 | "regex": "",
297 | "tagValuesQuery": "",
298 | "tagsQuery": "name",
299 | "type": "query",
300 | "useTags": false
301 | },
302 | {
303 | "current": {},
304 | "datasource": "Cinnamon Prometheus",
305 | "hide": 0,
306 | "includeAll": false,
307 | "allValue": ".*",
308 | "multi": false,
309 | "name": "Quantile",
310 | "options": [],
311 | "query": "label_values(stopwatch_stopwatch_time_ns, quantile)",
312 | "refresh": 2,
313 | "regex": "",
314 | "tagValuesQuery": "",
315 | "tagsQuery": "name",
316 | "type": "query",
317 | "useTags": false
318 | }
319 | ]
320 | },
321 | "annotations": {
322 | "list": []
323 | },
324 | "refresh": false,
325 | "schemaVersion": 12,
326 | "version": 3,
327 | "links": [],
328 | "gnetId": null
329 | }
--------------------------------------------------------------------------------
/telemetry/akka-dashboards/akka-http-clients.json:
--------------------------------------------------------------------------------
1 | {
2 | "id": null,
3 | "title": "Akka HTTP Clients",
4 | "tags": [],
5 | "style": "dark",
6 | "timezone": "browser",
7 | "editable": true,
8 | "hideControls": true,
9 | "sharedCrosshair": false,
10 | "rows": [
11 | {
12 | "title": "",
13 | "collapse": false,
14 | "editable": true,
15 | "height": "250px",
16 | "panels": [
17 | {
18 | "datasource": "Cinnamon Prometheus",
19 | "title": "Client Pool Connections",
20 | "description": "",
21 | "type": "timeseries",
22 | "id": 1,
23 | "interval": "10s",
24 | "fieldConfig": {
25 | "defaults": {
26 | "custom": {
27 | "drawStyle": "line",
28 | "lineInterpolation": "linear",
29 | "lineWidth": 2,
30 | "fillOpacity": 10,
31 | "gradientMode": "none",
32 | "showPoints": "never",
33 | "stacking": {
34 | "mode": "none"
35 | },
36 | "axisPlacement": "auto",
37 | "axisLabel": null
38 | },
39 | "color": {
40 | "mode": "palette-classic"
41 | },
42 | "unit": "short",
43 | "min": 0,
44 | "max": null
45 | }
46 | },
47 | "options": {
48 | "legend": {
49 | "showLegend": false
50 | },
51 | "tooltip": {
52 | "mode": "multi",
53 | "sort": "desc"
54 | }
55 | },
56 | "span": 6,
57 | "targets": [
58 | {
59 | "expr": "akka_http_client_pool_connections{host=~\"$Servers\", application=~\"$Applications\", actor_system=~\"$ActorSystems\", client_pool=~\"$ClientPools\"}",
60 | "format": "time_series",
61 | "legendFormat": "{{host}} / {{client_pool}} / {{http_client}}"
62 | }
63 | ]
64 | }
65 | ]
66 | },
67 | {
68 | "title": "",
69 | "collapse": false,
70 | "editable": true,
71 | "height": "250px",
72 | "panels": [
73 | {
74 | "datasource": "Cinnamon Prometheus",
75 | "title": "$Services request rate: 1 min",
76 | "description": "",
77 | "type": "timeseries",
78 | "id": 2,
79 | "interval": "10s",
80 | "fieldConfig": {
81 | "defaults": {
82 | "custom": {
83 | "drawStyle": "line",
84 | "lineInterpolation": "linear",
85 | "lineWidth": 2,
86 | "fillOpacity": 10,
87 | "gradientMode": "none",
88 | "showPoints": "never",
89 | "stacking": {
90 | "mode": "none"
91 | },
92 | "axisPlacement": "auto",
93 | "axisLabel": "ops/second"
94 | },
95 | "color": {
96 | "mode": "palette-classic"
97 | },
98 | "unit": "ops",
99 | "min": 0,
100 | "max": null
101 | }
102 | },
103 | "options": {
104 | "legend": {
105 | "showLegend": false
106 | },
107 | "tooltip": {
108 | "mode": "multi",
109 | "sort": "desc"
110 | }
111 | },
112 | "span": 6,
113 | "targets": [
114 | {
115 | "expr": "irate(akka_http_http_client_http_client_requests{host=~\"$Servers\", application=~\"$Applications\", actor_system=~\"$ActorSystems\", client_pool=~\"$ClientPools\", http_client=~\"$Services\"}[1m])",
116 | "format": "time_series",
117 | "legendFormat": "{{host}} / {{client_pool}} / {{http_client}}"
118 | }
119 | ]
120 | },
121 | {
122 | "datasource": "Cinnamon Prometheus",
123 | "title": "$Services response time ($Quantile quantile)",
124 | "description": "",
125 | "type": "timeseries",
126 | "id": 3,
127 | "interval": "10s",
128 | "fieldConfig": {
129 | "defaults": {
130 | "custom": {
131 | "drawStyle": "line",
132 | "lineInterpolation": "linear",
133 | "lineWidth": 2,
134 | "fillOpacity": 10,
135 | "gradientMode": "none",
136 | "showPoints": "never",
137 | "stacking": {
138 | "mode": "none"
139 | },
140 | "axisPlacement": "auto",
141 | "axisLabel": null
142 | },
143 | "color": {
144 | "mode": "palette-classic"
145 | },
146 | "unit": "ns",
147 | "min": 0,
148 | "max": null
149 | }
150 | },
151 | "options": {
152 | "legend": {
153 | "showLegend": false
154 | },
155 | "tooltip": {
156 | "mode": "multi",
157 | "sort": "desc"
158 | }
159 | },
160 | "span": 6,
161 | "targets": [
162 | {
163 | "expr": "akka_http_http_client_http_client_service_response_time_ns{host=~\"$Servers\", application=~\"$Applications\", actor_system=~\"$ActorSystems\", client_pool=~\"$ClientPools\", http_client=~\"$Services\", quantile=~\"$Quantile\"}",
164 | "format": "time_series",
165 | "legendFormat": "{{host}} / {{client_pool}} / {{http_client}}"
166 | }
167 | ]
168 | }
169 | ],
170 | "repeat": "Services",
171 | "title": "$Services"
172 | }
173 | ],
174 | "time": {
175 | "from": "now-15m",
176 | "to": "now"
177 | },
178 | "timepicker": {
179 | "refresh_intervals": [
180 | "5s",
181 | "10s",
182 | "30s",
183 | "1m",
184 | "5m",
185 | "15m",
186 | "30m",
187 | "1h",
188 | "2h",
189 | "1d"
190 | ],
191 | "time_options": [
192 | "5m",
193 | "15m",
194 | "1h",
195 | "6h",
196 | "12h",
197 | "24h",
198 | "2d",
199 | "7d",
200 | "30d"
201 | ]
202 | },
203 | "templating": {
204 | "list": [
205 | {
206 | "current": {},
207 | "datasource": "Cinnamon Prometheus",
208 | "hide": 0,
209 | "includeAll": true,
210 | "allValue": ".*",
211 | "multi": true,
212 | "name": "Applications",
213 | "options": [],
214 | "query": "label_values(application)",
215 | "refresh": 2,
216 | "regex": "",
217 | "tagValuesQuery": "",
218 | "tagsQuery": "name",
219 | "type": "query",
220 | "useTags": false
221 | },
222 | {
223 | "current": {},
224 | "datasource": "Cinnamon Prometheus",
225 | "hide": 0,
226 | "includeAll": true,
227 | "allValue": ".*",
228 | "multi": true,
229 | "name": "Servers",
230 | "options": [],
231 | "query": "label_values(akka_http_http_client_http_client_requests{application=~\"$Applications\"}, host)",
232 | "refresh": 2,
233 | "regex": "",
234 | "tagValuesQuery": "",
235 | "tagsQuery": "name",
236 | "type": "query",
237 | "useTags": false
238 | },
239 | {
240 | "current": {},
241 | "datasource": "Cinnamon Prometheus",
242 | "hide": 0,
243 | "includeAll": true,
244 | "allValue": ".*",
245 | "multi": true,
246 | "name": "ActorSystems",
247 | "options": [],
248 | "query": "label_values(akka_http_http_client_http_client_requests{host=~\"$Servers\", application=~\"$Applications\"}, actor_system)",
249 | "refresh": 2,
250 | "regex": "",
251 | "tagValuesQuery": "",
252 | "tagsQuery": "name",
253 | "type": "query",
254 | "useTags": false
255 | },
256 | {
257 | "current": {},
258 | "datasource": "Cinnamon Prometheus",
259 | "hide": 0,
260 | "includeAll": true,
261 | "allValue": ".*",
262 | "multi": true,
263 | "name": "ClientPools",
264 | "options": [],
265 | "query": "label_values(akka_http_http_client_http_client_requests{host=~\"$Servers\", application=~\"$Applications\", actor_system=~\"$ActorSystems\"}, client_pool)",
266 | "refresh": 2,
267 | "regex": "",
268 | "tagValuesQuery": "",
269 | "tagsQuery": "name",
270 | "type": "query",
271 | "useTags": false
272 | },
273 | {
274 | "current": {},
275 | "datasource": "Cinnamon Prometheus",
276 | "hide": 0,
277 | "includeAll": true,
278 | "allValue": ".*",
279 | "multi": true,
280 | "name": "Services",
281 | "options": [],
282 | "query": "label_values(akka_http_http_client_http_client_requests{host=~\"$Servers\", application=~\"$Applications\", actor_system=~\"$ActorSystems\"}, http_client)",
283 | "refresh": 2,
284 | "regex": "",
285 | "tagValuesQuery": "",
286 | "tagsQuery": "name",
287 | "type": "query",
288 | "useTags": false
289 | },
290 | {
291 | "current": {},
292 | "datasource": "Cinnamon Prometheus",
293 | "hide": 0,
294 | "includeAll": false,
295 | "allValue": ".*",
296 | "multi": false,
297 | "name": "Quantile",
298 | "options": [],
299 | "query": "label_values(akka_http_http_client_http_client_service_response_time_ns{host=~\"$Servers\", application=~\"$Applications\"}, quantile)",
300 | "refresh": 2,
301 | "regex": "",
302 | "tagValuesQuery": "",
303 | "tagsQuery": "name",
304 | "type": "query",
305 | "useTags": false
306 | }
307 | ]
308 | },
309 | "annotations": {
310 | "list": []
311 | },
312 | "refresh": false,
313 | "schemaVersion": 12,
314 | "version": 3,
315 | "links": [],
316 | "gnetId": null
317 | }
--------------------------------------------------------------------------------