├── .github ├── CODEOWNERS ├── dependabot.yml └── workflows │ └── publish-and-deploy-images.yaml ├── .gitignore ├── LICENSE ├── README.md ├── alloy ├── config.alloy ├── endpoints-cloud.json └── endpoints.json ├── beyla └── config.yaml ├── cloud ├── dashboard-metrics-logs-traces-1.json └── envvars-grafana-cloud-unconfigured.sh ├── docker-compose-cloud.yml ├── docker-compose-otel.yml ├── docker-compose.yml ├── grafana ├── .DS_Store ├── definitions │ ├── k6.json │ ├── mlt-erroring-endpoints.json │ ├── mlt.json │ └── traces-in-dashboards.json └── provisioning │ ├── dashboards │ └── mlt.yaml │ ├── datasources │ └── datasources.yaml │ └── plugins │ └── loki-explorer-app.yaml ├── images └── Introduction to MLTP Arch Diagram.png ├── k6 └── mythical-loadtest.js ├── k8s └── mythical │ ├── mythical-beasts-deployment.yaml │ ├── mythical-beasts-persistentvolumeclaim.yaml │ └── mythical-beasts-service.yaml ├── loki └── loki.yaml ├── mimir └── mimir.yaml ├── otel └── otel.yml ├── source ├── .DS_Store ├── build-source.sh ├── common │ ├── endpoints.js │ ├── logging.js │ ├── queue.js │ └── tracing.js ├── docker │ └── Dockerfile ├── mythical-beasts-frontend │ ├── .gitignore │ ├── Dockerfile │ ├── nginx.conf │ ├── package-lock.json │ ├── package.json │ ├── public │ │ ├── index.html │ │ └── manifest.json │ ├── rsbuild.config.js │ └── src │ │ ├── App.css │ │ ├── App.js │ │ ├── components │ │ ├── BeastManager.css │ │ └── BeastManager.js │ │ ├── faro.js │ │ ├── index.css │ │ ├── index.js │ │ └── services │ │ └── api.js ├── mythical-beasts-recorder │ ├── index.js │ ├── package-lock.json │ └── package.json ├── mythical-beasts-requester │ ├── index.js │ ├── package-lock.json │ └── package.json └── mythical-beasts-server │ ├── index.js │ ├── package-lock.json │ └── package.json └── tempo └── tempo.yaml /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | # This repo is nominally owned by Grafana FE: 2 | * @grafana/field-engineering 3 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: "docker" 4 | directory: "/source/docker/" 5 | schedule: 6 | interval: "weekly" 7 | open-pull-requests-limit: 10 8 | commit-message: 9 | prefix: "chore(deps): " 10 | - package-ecosystem: "github-actions" 11 | directory: "/" 12 | schedule: 13 | interval: "weekly" 14 | open-pull-requests-limit: 10 15 | commit-message: 16 | prefix: "chore(deps): " 17 | - package-ecosystem: 'npm' 18 | directory: '/source/mythical-beasts-recorder/' 19 | schedule: 20 | interval: 'daily' 21 | open-pull-requests-limit: 10 22 | commit-message: 23 | prefix: "chore(deps): " 24 | - package-ecosystem: 'npm' 25 | directory: '/source/mythical-beasts-requester/' 26 | schedule: 27 | interval: 'daily' 28 | open-pull-requests-limit: 10 29 | commit-message: 30 | prefix: "chore(deps): " 31 | - package-ecosystem: 'npm' 32 | directory: '/source/mythical-beasts-server/' 33 | schedule: 34 | interval: 'daily' 35 | open-pull-requests-limit: 10 36 | commit-message: 37 | prefix: "chore(deps): " -------------------------------------------------------------------------------- /.github/workflows/publish-and-deploy-images.yaml: -------------------------------------------------------------------------------- 1 | name: "Build and publish mythical images" 2 | on: 3 | push: 4 | branches: 5 | - 'main' 6 | paths: 7 | - source/** 8 | 9 | pull_request: 10 | types: [opened, synchronize] 11 | paths: 12 | - source/** 13 | jobs: 14 | build_and_push_images: 15 | runs-on: ubuntu-latest 16 | permissions: 17 | contents: read 18 | packages: write 19 | id-token: write 20 | env: 21 | REGISTRY_LOCATION: grafana/intro-to-mltp 22 | 23 | strategy: 24 | fail-fast: false 25 | matrix: 26 | file_tag: 27 | - file: source/docker/Dockerfile 28 | tag_suffix: mythical-beasts-requester 29 | context: source 30 | service: mythical-beasts-requester 31 | setup-qemu: true 32 | - file: source/docker/Dockerfile 33 | tag_suffix: mythical-beasts-server 34 | context: source 35 | service: mythical-beasts-server 36 | setup-qemu: true 37 | - file: source/docker/Dockerfile 38 | tag_suffix: mythical-beasts-recorder 39 | context: source 40 | service: mythical-beasts-recorder 41 | setup-qemu: true 42 | - file: source/mythical-beasts-frontend/Dockerfile 43 | tag_suffix: mythical-beasts-frontend 44 | context: source/mythical-beasts-frontend 45 | service: mythical-beasts-frontend 46 | setup-qemu: true 47 | 48 | steps: 49 | - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 50 | with: 51 | fetch-depth: 0 52 | persist-credentials: false 53 | 54 | - name: DetermineReference 55 | id: get_reference 56 | run: | 57 | if [ -z "$GITHUB_HEAD_REF" ]; then 58 | echo "ref=$GITHUB_REF" >> "$GITHUB_OUTPUT" 59 | else 60 | echo "ref=$GITHUB_HEAD_REF" >> "$GITHUB_OUTPUT" 61 | fi 62 | 63 | - name: Determine image_tag to use 64 | id: image_tag 65 | run: | 66 | SHA=$(git rev-parse --short HEAD) 67 | BRANCH=$(git rev-parse --abbrev-ref HEAD | tr '/' '__') 68 | REF_TAG=${{ steps.get_reference.outputs.ref }} 69 | 70 | echo "SHA: $SHA" 71 | echo "Branch: $BRANCH" 72 | echo "Ref tag: $REF_TAG" 73 | 74 | if [[ $REF_TAG =~ refs\/(heads|tags)\/ ]]; then 75 | REF_TAG=$(echo $REF_TAG | cut -d "/" -f 3) 76 | echo "Shortened ref tag is $REF_TAG" 77 | fi 78 | 79 | REF_TAG=$(echo -n "$REF_TAG" | tr -c '[:alnum:]._' '-') 80 | echo "version=$REF_TAG" >> "$GITHUB_OUTPUT" 81 | echo "Using version tag $REF_TAG" 82 | 83 | # Finally check to see if we're building on main; if we are, add a tag for latest. 84 | if [ "$BRANCH" == "main" ]; then 85 | echo "latest_tag=${{env.REGISTRY_LOCATION}}:${{matrix.file_tag.tag_suffix}}-latest" >> "$GITHUB_OUTPUT" 86 | fi 87 | 88 | - name: Set up QEMU 89 | if: ${{ matrix.file_tag.setup-qemu }} 90 | uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 91 | with: 92 | image: tonistiigi/binfmt:master 93 | 94 | - name: Set up Docker Buildx 95 | uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 96 | with: 97 | config-inline: | 98 | [worker.oci] 99 | max-parallelism = 2 100 | 101 | - name: Retrieve credentials from Vault 102 | uses: grafana/shared-workflows/actions/get-vault-secrets@get-vault-secrets/v1.2.1 103 | with: 104 | common_secrets: | 105 | DOCKERHUB_USERNAME=dockerhub:username 106 | DOCKERHUB_PASSWORD=dockerhub:password 107 | 108 | - name: Login to Docker Hub 109 | uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 110 | with: 111 | username: ${{ env.DOCKERHUB_USERNAME }} 112 | password: ${{ env.DOCKERHUB_PASSWORD }} 113 | 114 | - name: Matrix Build and push Mythical images 115 | uses: docker/build-push-action@14487ce63c7a62a4a324b0bfb37086795e31c6c1 116 | with: 117 | context: ${{ matrix.file_tag.context }} 118 | file: ${{ matrix.file_tag.file }} 119 | build-args: | 120 | SERVICE=${{ matrix.file_tag.service }} 121 | platforms: linux/amd64,linux/arm64 122 | outputs: type=registry,push=${{inputs.push_images}} 123 | tags: | 124 | ${{ env.REGISTRY_LOCATION }}:${{ matrix.file_tag.tag_suffix }}-${{ steps.image_tag.outputs.version }} 125 | ${{ steps.image_tag.outputs.latest_tag }} 126 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | **/node_modules/ 2 | agent/config-cloud-configured.yaml 3 | docker-compose-cloud-configured.yml 4 | unconfigure.sh 5 | .idea/ 6 | -------------------------------------------------------------------------------- /alloy/endpoints-cloud.json: -------------------------------------------------------------------------------- 1 | { 2 | "metrics": { 3 | "url": "", 4 | "basicAuth": { 5 | "username": "", 6 | "password": "" 7 | } 8 | }, 9 | "logs": { 10 | "url": "", 11 | "basicAuth": { 12 | "username": "", 13 | "password": "" 14 | } 15 | }, 16 | "traces": { 17 | "url": "", 18 | "basicAuthToken": "", 19 | "tls": { 20 | "insecure": false, 21 | "insecureSkipVerify": true 22 | } 23 | }, 24 | "profiles": { 25 | "url": "", 26 | "basicAuth": { 27 | "username": "", 28 | "password": "" 29 | } 30 | }, 31 | "add_spanmetric_suffixes" : true, 32 | "spanmetrics_namespace": "traces.span.metrics" 33 | } 34 | -------------------------------------------------------------------------------- /alloy/endpoints.json: -------------------------------------------------------------------------------- 1 | { 2 | "metrics": { 3 | "url": "http://mimir:9009/api/v1/push", 4 | "basicAuth": { 5 | "username": "", 6 | "password": "" 7 | } 8 | }, 9 | "logs": { 10 | "url": "http://loki:3100/loki/api/v1/push", 11 | "basicAuth": { 12 | "username": "", 13 | "password": "" 14 | } 15 | }, 16 | "traces": { 17 | "url": "http://tempo:4317", 18 | "basicAuthToken": "", 19 | "tls": { 20 | "insecure": true, 21 | "insecureSkipVerify": true 22 | } 23 | }, 24 | "profiles": { 25 | "url": "http://pyroscope:4040", 26 | "basicAuth": { 27 | "username": "", 28 | "password": "" 29 | } 30 | }, 31 | "add_spanmetric_suffixes" : false, 32 | "spanmetrics_namespace": "traces.spanmetrics" 33 | } 34 | -------------------------------------------------------------------------------- /beyla/config.yaml: -------------------------------------------------------------------------------- 1 | routes: 2 | patterns: 3 | - /owlbear 4 | - /unicorn 5 | - /manticore 6 | - /illithid 7 | - /beholder 8 | unmatched: heuristic 9 | -------------------------------------------------------------------------------- /cloud/envvars-grafana-cloud-unconfigured.sh: -------------------------------------------------------------------------------- 1 | # Grafana Cloud Configuration 2 | # API Access to A Grafana Stack, Metrics and Logs 3 | # 4 | # Grafana Stack (Managed API Key) 5 | export GRAFANA_STACK_URL="https://__SLUG__.grafana.net" 6 | export GRAFANA_STACK_API_KEY="" 7 | # 8 | # Grafana Metrics Endpoint 9 | # See: https://grafana.com/docs/grafana-cloud/metrics-prometheus/ 10 | export GRAFANA_METRICS_KEY_NAME="Metrics-Admin-DD-MM-YYYY" 11 | export GRAFANA_METRICS_HOST="prometheus-prod-10-prod-us-central-0.grafana.net" 12 | export GRAFANA_METRICS_USERNAME="__ID__" 13 | export GRAFANA_METRICS_API_KEY="__API_KEY__" 14 | export GRAFANA_METRICS_QUERY_URL="https://$GRAFANA_METRICS_HOST/api/prom/api/v1" 15 | export GRAFANA_METRICS_WRITE_URL="https://$GRAFANA_METRICS_HOST/api/prom/push" 16 | # 17 | # Grafana Logs Endpoint 18 | # See: https://grafana.com/docs/loki/latest/api/ 19 | export GRAFANA_LOGS_KEY_NAME="Logs-Admin-DD-MM-YYYY" 20 | export GRAFANA_LOGS_HOST="logs-prod3.grafana.net" 21 | export GRAFANA_LOGS_USERNAME="__ID__" 22 | export GRAFANA_LOGS_API_KEY="__API_KEY__" 23 | export GRAFANA_LOGS_QUERY_URL="https://$GRAFANA_LOGS_HOST/loki/api/v1" 24 | export GRAFANA_LOGS_WRITE_URL="https://$GRAFANA_LOGS_HOST/loki/api/v1/push" 25 | # 26 | # Grafana Traces Endpoint 27 | export GRAFANA_TRACES_KEY_NAME="Traces-Admin-DD-MM-YYYY" 28 | export GRAFANA_TRACES_USERNAME="__ID__" 29 | export GRAFANA_TRACES_API_KEY="__API_KEY__" 30 | export GRAFANA_TRACES_HOST="tempo-us-central1.grafana.net:443" 31 | # 32 | # 33 | # End -------------------------------------------------------------------------------- /docker-compose-cloud.yml: -------------------------------------------------------------------------------- 1 | name: grafana-intro-to-mltp-cloud 2 | volumes: 3 | grafana: 4 | postgres: 5 | services: 6 | # Grafana Alloy batches and processes traces sent to it, generating 7 | # auto-logs from those traces. 8 | # Includes Metrics, Logs, Traces and Profiles. 9 | alloy: 10 | image: grafana/alloy:v1.9.1 11 | ports: 12 | - "12347:12345" 13 | - "12348:12348" 14 | - "6832:6832" 15 | - "55679:55679" 16 | - "4317:4317" 17 | - "4318:4318" 18 | volumes: 19 | - "./alloy/config.alloy:/etc/alloy/config.alloy" 20 | - "./alloy/endpoints-cloud.json:/etc/alloy/endpoints.json" 21 | command: [ 22 | "run", 23 | "--server.http.listen-addr=0.0.0.0:12345", 24 | "--stability.level=public-preview", 25 | "/etc/alloy/config.alloy", 26 | ] 27 | 28 | # A RabbitMQ queue used to send message between the requester and the server microservices. 29 | mythical-queue: 30 | image: rabbitmq:management 31 | restart: always 32 | ports: 33 | - "5672:5672" 34 | - "15672:15672" 35 | healthcheck: 36 | test: rabbitmq-diagnostics check_running 37 | interval: 5s 38 | timeout: 30s 39 | retries: 10 40 | 41 | # A postgres DB used to store data by the API server microservice. 42 | mythical-database: 43 | image: postgres:14.5 44 | restart: always 45 | environment: 46 | POSTGRES_PASSWORD: "mythical" 47 | volumes: 48 | - "postgres:/var/lib/postgresql/data" 49 | ports: 50 | - "5432:5432" 51 | 52 | # A microservice that makes requests to the API server microservice. Requests are also pushed onto the mythical-queue. 53 | mythical-requester: 54 | #build: 55 | # context: ./source 56 | # dockerfile: docker/Dockerfile 57 | # args: 58 | # SERVICE: mythical-beasts-requester 59 | image: grafana/intro-to-mltp:mythical-beasts-requester-latest 60 | restart: always 61 | depends_on: 62 | mythical-queue: 63 | condition: service_healthy 64 | mythical-server: 65 | condition: service_started 66 | ports: 67 | - "4001:4001" 68 | environment: 69 | - NAMESPACE=production 70 | - LOGS_TARGET=http://alloy:3100/loki/api/v1/push 71 | - TRACING_COLLECTOR_HOST=alloy 72 | - TRACING_COLLECTOR_PORT=4317 73 | - PROFILE_COLLECTOR_HOST=alloy 74 | - PROFILE_COLLECTOR_PORT=4040 75 | - OTEL_EXPORTER_OTLP_TRACES_INSECURE=true 76 | - OTEL_RESOURCE_ATTRIBUTES=ip=1.2.3.4 77 | 78 | # The API server microservice. 79 | # It writes logs directly to the Loki service, exposes metrics for the Prometheus 80 | # service and sends traces to the Grafana Alloy instance. 81 | mythical-server: 82 | #build: 83 | # context: ./source 84 | # dockerfile: docker/Dockerfile 85 | # args: 86 | # SERVICE: mythical-beasts-server 87 | image: grafana/intro-to-mltp:mythical-beasts-server-latest 88 | restart: always 89 | ports: 90 | - "4000:4000" 91 | - "80:80" 92 | depends_on: 93 | - mythical-database 94 | environment: 95 | - NAMESPACE=production 96 | - LOGS_TARGET=http://alloy:3100/loki/api/v1/push 97 | - TRACING_COLLECTOR_HOST=alloy 98 | - TRACING_COLLECTOR_PORT=4317 99 | - PROFILE_COLLECTOR_HOST=alloy 100 | - PROFILE_COLLECTOR_PORT=4040 101 | - OTEL_EXPORTER_OTLP_TRACES_INSECURE=true 102 | - OTEL_RESOURCE_ATTRIBUTES=ip=1.2.3.5 103 | 104 | # A microservice that consumes requests from the mythical-queue 105 | mythical-recorder: 106 | #build: 107 | # context: ./source 108 | # dockerfile: docker/Dockerfile 109 | # args: 110 | # SERVICE: mythical-beasts-recorder 111 | image: grafana/intro-to-mltp:mythical-beasts-recorder-latest 112 | restart: always 113 | depends_on: 114 | mythical-queue: 115 | condition: service_healthy 116 | ports: 117 | - "4002:4002" 118 | environment: 119 | - NAMESPACE=production 120 | - LOGS_TARGET=http://alloy:3100/loki/api/v1/push 121 | - TRACING_COLLECTOR_HOST=alloy 122 | - TRACING_COLLECTOR_PORT=4317 123 | - PROFILE_COLLECTOR_HOST=alloy 124 | - PROFILE_COLLECTOR_PORT=4040 125 | - OTEL_EXPORTER_OTLP_TRACES_INSECURE=true 126 | - OTEL_RESOURCE_ATTRIBUTES=ip=1.2.3.5 127 | -------------------------------------------------------------------------------- /docker-compose-otel.yml: -------------------------------------------------------------------------------- 1 | name: grafana-intro-to-mltp-otel 2 | volumes: 3 | grafana: 4 | postgres: 5 | services: 6 | # The opentelemetry-collector batches and processes traces sent to it. 7 | opentelemetry-collector: 8 | image: otel/opentelemetry-collector-contrib 9 | ports: 10 | - "12347:12345" 11 | - "12348:12348" 12 | - "6832:6832" 13 | - "55679:55679" 14 | - "4317:4317" 15 | - "4318:4318" 16 | volumes: 17 | - ./otel/otel.yml:/etc/otel-collector-config.yml 18 | command: ["--config=/etc/otel-collector-config.yml"] 19 | 20 | # The Grafana dashboarding server. 21 | grafana: 22 | image: grafana/grafana:12.0.2 23 | volumes: 24 | - "./grafana/definitions:/var/lib/grafana/dashboards" 25 | - "./grafana/provisioning:/etc/grafana/provisioning" 26 | ports: 27 | - "3000:3000" 28 | environment: 29 | - GF_FEATURE_TOGGLES_ENABLE=flameGraph traceqlSearch correlations traceQLStreaming metricsSummary traceqlEditor traceToMetrics traceToProfiles datatrails 30 | - GF_INSTALL_PLUGINS=grafana-lokiexplore-app,grafana-exploretraces-app,grafana-pyroscope-app 31 | - GF_AUTH_ANONYMOUS_ENABLED=true 32 | - GF_AUTH_ANONYMOUS_ORG_ROLE=Admin 33 | - GF_AUTH_DISABLE_LOGIN_FORM=true 34 | 35 | # A RabbitMQ queue used to send message between the requester and the server microservices. 36 | mythical-queue: 37 | image: rabbitmq:management 38 | restart: always 39 | ports: 40 | - "5672:5672" 41 | - "15672:15672" 42 | healthcheck: 43 | test: rabbitmq-diagnostics check_running 44 | interval: 5s 45 | timeout: 30s 46 | retries: 10 47 | 48 | # A postgres DB used to store data by the API server microservice. 49 | mythical-database: 50 | image: postgres:14.5 51 | restart: always 52 | environment: 53 | POSTGRES_PASSWORD: "mythical" 54 | volumes: 55 | - "postgres:/var/lib/postgresql/data" 56 | ports: 57 | - "5432:5432" 58 | 59 | # A microservice that makes requests to the API server microservice. Requests are also pushed onto the mythical-queue. 60 | mythical-requester: 61 | #build: 62 | # context: ./source 63 | # dockerfile: docker/Dockerfile 64 | # args: 65 | # SERVICE: mythical-beasts-requester 66 | image: grafana/intro-to-mltp:mythical-beasts-requester-latest 67 | restart: always 68 | depends_on: 69 | mythical-queue: 70 | condition: service_healthy 71 | mythical-server: 72 | condition: service_started 73 | ports: 74 | - "4001:4001" 75 | environment: 76 | - NAMESPACE=production 77 | - LOGS_TARGET=http://loki:3100/loki/api/v1/push 78 | - TRACING_COLLECTOR_HOST=opentelemetry-collector 79 | - TRACING_COLLECTOR_PORT=4317 80 | - PROFILE_COLLECTOR_HOST=pyroscope 81 | - PROFILE_COLLECTOR_PORT=4040 82 | - OTEL_EXPORTER_OTLP_TRACES_INSECURE=true 83 | - OTEL_RESOURCE_ATTRIBUTES=ip=1.2.3.4 84 | 85 | # The API server microservice. 86 | # It writes logs directly to the Loki service, exposes metrics for the Prometheus 87 | # service and sends traces to the Grafana opentelemetry-collector instance. 88 | mythical-server: 89 | #build: 90 | # context: ./source 91 | # dockerfile: docker/Dockerfile 92 | # args: 93 | # SERVICE: mythical-beasts-server 94 | image: grafana/intro-to-mltp:mythical-beasts-server-latest 95 | restart: always 96 | ports: 97 | - "4000:4000" 98 | - "80:80" 99 | depends_on: 100 | - mythical-database 101 | environment: 102 | - NAMESPACE=production 103 | - LOGS_TARGET=http://loki:3100/loki/api/v1/push 104 | - TRACING_COLLECTOR_HOST=opentelemetry-collector 105 | - TRACING_COLLECTOR_PORT=4317 106 | - PROFILE_COLLECTOR_HOST=pyroscope 107 | - PROFILE_COLLECTOR_PORT=4040 108 | - OTEL_EXPORTER_OTLP_TRACES_INSECURE=true 109 | - OTEL_RESOURCE_ATTRIBUTES=ip=1.2.3.5 110 | 111 | # A microservice that consumes requests from the mythical-queue 112 | mythical-recorder: 113 | #build: 114 | # context: ./source 115 | # dockerfile: docker/Dockerfile 116 | # args: 117 | # SERVICE: mythical-beasts-recorder 118 | image: grafana/intro-to-mltp:mythical-beasts-recorder-latest 119 | restart: always 120 | depends_on: 121 | mythical-queue: 122 | condition: service_healthy 123 | ports: 124 | - "4002:4002" 125 | environment: 126 | - NAMESPACE=production 127 | - LOGS_TARGET=http://loki:3100/loki/api/v1/push 128 | - TRACING_COLLECTOR_HOST=opentelemetry-collector 129 | - TRACING_COLLECTOR_PORT=4317 130 | - PROFILE_COLLECTOR_HOST=pyroscope 131 | - PROFILE_COLLECTOR_PORT=4040 132 | - OTEL_EXPORTER_OTLP_TRACES_INSECURE=true 133 | - OTEL_RESOURCE_ATTRIBUTES=ip=1.2.3.5 134 | 135 | # The Tempo service stores traces send to it by Grafana opentelemetry-collector, and takes 136 | # queries from Grafana to visualise those traces. 137 | tempo: 138 | image: grafana/tempo:2.8.1 139 | ports: 140 | - "3200:3200" 141 | - "55680:55680" 142 | - "55681:55681" 143 | - "14250:14250" 144 | command: [ "-config.file=/etc/tempo.yaml" ] 145 | volumes: 146 | - "./tempo/tempo.yaml:/etc/tempo.yaml" 147 | 148 | # The Loki service stores logs sent to it, and takes queries from Grafana 149 | # to visualise those logs. 150 | loki: 151 | image: grafana/loki:3.5.1 152 | command: ["--pattern-ingester.enabled=true", "-config.file=/etc/loki/loki.yaml"] 153 | ports: 154 | - "3100:3100" 155 | volumes: 156 | - "./loki/loki.yaml:/etc/loki/loki.yaml" 157 | 158 | mimir: 159 | image: grafana/mimir:2.16.0 160 | command: ["-ingester.native-histograms-ingestion-enabled=true", "-config.file=/etc/mimir.yaml"] 161 | ports: 162 | - "9009:9009" 163 | volumes: 164 | - "./mimir/mimir.yaml:/etc/mimir.yaml" 165 | 166 | pyroscope: 167 | image: grafana/pyroscope:1.13.5 168 | ports: 169 | - "4040:4040" 170 | command: ["server"] 171 | 172 | k6: 173 | image: grafana/k6:0.58.0 174 | volumes: 175 | - "./k6:/scripts" 176 | environment: 177 | - K6_PROMETHEUS_RW_SERVER_URL=http://mimir:9009/api/v1/push 178 | - K6_DURATION=3600s 179 | - K6_VUS=4 180 | - K6_PROMETHEUS_RW_TREND_AS_NATIVE_HISTOGRAM=true 181 | restart: always 182 | command: ["run", "-o", "experimental-prometheus-rw", "/scripts/mythical-loadtest.js"] 183 | 184 | beyla-requester: 185 | image: grafana/beyla:2.1.0 186 | # Beyla requires to be run in the same process namespace as the process it's watching. 187 | # In Docker, we can do this by joining the namespace for the watched process with the Beyla 188 | # container watching it by using a specific `pid` label. 189 | pid: "service:mythical-requester" 190 | # Beyla requires the several system capabilities to run, to add hooks to the underlying kernel. 191 | # Note that you should *always* be aware of the security implications of adding capabilities 192 | # before you do so. 193 | cap_add: 194 | - SYS_ADMIN 195 | - SYS_RESOURCE 196 | - NET_RAW 197 | - DAC_READ_SEARCH 198 | - SYS_PTRACE 199 | - PERFMON 200 | - BPF 201 | - CHECKPOINT_RESTORE 202 | # If using the above capability fails to instrument your service, remove it and uncomment the 203 | # line below. Beware that this will allow Beyla to run with full privileges, which may be 204 | # undesirable. 205 | #privileged: true 206 | command: 207 | - /beyla 208 | - --config=/configs/config.yaml 209 | volumes: 210 | - ./beyla/:/configs 211 | # See the full list of configuration options at 212 | # https://grafana.com/docs/grafana-cloud/monitor-applications/beyla/configure/options/ for more details on the 213 | # options set below. 214 | environment: 215 | BEYLA_OPEN_PORT: "4001" # Instrument any service listening on port 4001. 216 | BEYLA_SERVICE_NAMESPACE: "mythical" # The namespace for the service. 217 | BEYLA_PROMETHEUS_PORT: "9090" # The port to expose Prometheus metrics on. 218 | #BEYLA_BPF_TRACK_REQUEST_HEADERS: "true" 219 | OTEL_SERVICE_NAME: "beyla-mythical-requester" # The service name to use for OpenTelemetry traces. 220 | OTEL_EXPORTER_OTLP_TRACES_INSECURE: "true" # Whether to use an insecure connection to Grafana Agent. 221 | OTEL_EXPORTER_OTLP_PROTOCOL: "grpc" # The protocol to use to send traces to Grafana Agent. 222 | # The endpoint to send traces to. 223 | OTEL_EXPORTER_OTLP_TRACES_ENDPOINT: "http://opentelemetry-collector:4317" 224 | # The `depends_on` block below ensures that the mythical-requester service is started before Beyla. 225 | depends_on: 226 | mythical-requester: 227 | condition: service_started 228 | 229 | beyla-server: 230 | image: grafana/beyla:2.1.0 231 | # Beyla requires to be run in the same process namespace as the process it's watching. 232 | # In Docker, we can do this by joining the namespace for the watched process with the Beyla 233 | # container watching it by using a specific `pid` label. 234 | pid: "service:mythical-server" 235 | # Beyla requires the several system capabilities to run, to add hooks to the underlying kernel. 236 | # Note that you should *always* be aware of the security implications of adding capabilities 237 | # before you do so. 238 | cap_add: 239 | - SYS_ADMIN 240 | - SYS_RESOURCE 241 | - NET_RAW 242 | - DAC_READ_SEARCH 243 | - SYS_PTRACE 244 | - PERFMON 245 | - BPF 246 | - CHECKPOINT_RESTORE 247 | # If using the above capability fails to instrument your service, remove it and uncomment the 248 | # line below. Beware that this will allow Beyla to run with full privileges, which may be 249 | # undesirable. 250 | #privileged: true 251 | command: 252 | - /beyla 253 | - --config=/configs/config.yaml 254 | volumes: 255 | - ./beyla/:/configs 256 | # See the full list of configuration options at 257 | # https://grafana.com/docs/grafana-cloud/monitor-applications/beyla/configure/options/ for more details on the 258 | # options set below. 259 | environment: 260 | BEYLA_OPEN_PORT: "4000" # Instrument any service listening on port 4000. 261 | BEYLA_SERVICE_NAMESPACE: "mythical" # The namespace for the service. 262 | BEYLA_PROMETHEUS_PORT: "9090" # The port to expose Prometheus metrics on. 263 | #BEYLA_BPF_TRACK_REQUEST_HEADERS: "true" 264 | OTEL_SERVICE_NAME: "beyla-mythical-server" # The service name to use for OpenTelemetry traces. 265 | OTEL_EXPORTER_OTLP_TRACES_INSECURE: "true" # Whether to use an insecure connection to Grafana Agent. 266 | OTEL_EXPORTER_OTLP_PROTOCOL: "grpc" # The protocol to use to send traces to Grafana Agent. 267 | # The endpoint to send traces to. 268 | OTEL_EXPORTER_OTLP_TRACES_ENDPOINT: "http://opentelemetry-collector:4317" 269 | # The `depends_on` block below ensures that the mythical-server service is started before Beyla. 270 | depends_on: 271 | mythical-server: 272 | condition: service_started 273 | 274 | beyla-recorder: 275 | image: grafana/beyla:2.1.0 276 | # Beyla requires to be run in the same process namespace as the process it's watching. 277 | # In Docker, we can do this by joining the namespace for the watched process with the Beyla 278 | # container watching it by using a specific `pid` label. 279 | pid: "service:mythical-recorder" 280 | # Beyla requires the several system capabilities to run, to add hooks to the underlying kernel. 281 | # Note that you should *always* be aware of the security implications of adding capabilities 282 | # before you do so. 283 | cap_add: 284 | - SYS_ADMIN 285 | - SYS_RESOURCE 286 | - NET_RAW 287 | - DAC_READ_SEARCH 288 | - SYS_PTRACE 289 | - PERFMON 290 | - BPF 291 | - CHECKPOINT_RESTORE 292 | # If using the above capability fails to instrument your service, remove it and uncomment the 293 | # line below. Beware that this will allow Beyla to run with full privileges, which may be 294 | # undesirable. 295 | #privileged: true 296 | command: 297 | - /beyla 298 | - --config=/configs/config.yaml 299 | volumes: 300 | - ./beyla/:/configs 301 | # See the full list of configuration options at 302 | # https://grafana.com/docs/grafana-cloud/monitor-applications/beyla/configure/options/ for more details on the 303 | # options set below. 304 | environment: 305 | BEYLA_OPEN_PORT: "4002" # Instrument any service listening on port 4002. 306 | BEYLA_SERVICE_NAMESPACE: "mythical" # The namespace for the service. 307 | BEYLA_PROMETHEUS_PORT: "9090" # The port to expose Prometheus metrics on. 308 | #BEYLA_BPF_TRACK_REQUEST_HEADERS: "true" 309 | OTEL_SERVICE_NAME: "beyla-mythical-recorder" # The service name to use for OpenTelemetry traces. 310 | OTEL_EXPORTER_OTLP_TRACES_INSECURE: "true" # Whether to use an insecure connection to Grafana Agent. 311 | OTEL_EXPORTER_OTLP_PROTOCOL: "grpc" # The protocol to use to send traces to Grafana Agent. 312 | # The endpoint to send traces to. 313 | OTEL_EXPORTER_OTLP_TRACES_ENDPOINT: "http://opentelemetry-collector:4317" 314 | # The `depends_on` block below ensures that the mythical-recorder service is started before Beyla. 315 | depends_on: 316 | mythical-recorder: 317 | condition: service_started 318 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | name: grafana-intro-to-mltp 2 | volumes: 3 | grafana: 4 | postgres: 5 | services: 6 | # Grafana Alloy batches and processes traces sent to it, generating 7 | # auto-logs from those traces. 8 | # Includes Metrics, Logs, Traces and Profiles. 9 | alloy: 10 | image: grafana/alloy:v1.9.1 11 | ports: 12 | - "12350:12350" 13 | - "12347:12345" 14 | - "12348:12348" 15 | - "6832:6832" 16 | - "55679:55679" 17 | - "4317:4317" 18 | - "4318:4318" 19 | volumes: 20 | - "./alloy/config.alloy:/etc/alloy/config.alloy" 21 | - "./alloy/endpoints.json:/etc/alloy/endpoints.json" 22 | command: [ 23 | "run", 24 | "--server.http.listen-addr=0.0.0.0:12345", 25 | "--stability.level=public-preview", 26 | "/etc/alloy/config.alloy", 27 | ] 28 | 29 | # The Grafana dashboarding server. 30 | grafana: 31 | image: grafana/grafana:12.0.2 32 | volumes: 33 | - "./grafana/definitions:/var/lib/grafana/dashboards" 34 | - "./grafana/provisioning:/etc/grafana/provisioning" 35 | ports: 36 | - "3000:3000" 37 | environment: 38 | - GF_FEATURE_TOGGLES_ENABLE=flameGraph traceqlSearch traceQLStreaming correlations metricsSummary traceqlEditor traceToMetrics traceToProfiles datatrails 39 | - GF_INSTALL_PLUGINS=grafana-lokiexplore-app,grafana-exploretraces-app,grafana-pyroscope-app 40 | - GF_AUTH_ANONYMOUS_ENABLED=true 41 | - GF_AUTH_ANONYMOUS_ORG_ROLE=Admin 42 | - GF_AUTH_DISABLE_LOGIN_FORM=true 43 | 44 | # A RabbitMQ queue used to send message between the requester and the server microservices. 45 | mythical-queue: 46 | image: rabbitmq:management 47 | restart: always 48 | ports: 49 | - "5672:5672" 50 | - "15672:15672" 51 | healthcheck: 52 | test: rabbitmq-diagnostics check_running 53 | interval: 5s 54 | timeout: 30s 55 | retries: 10 56 | 57 | # A postgres DB used to store data by the API server microservice. 58 | mythical-database: 59 | image: postgres:14.5 60 | restart: always 61 | environment: 62 | POSTGRES_PASSWORD: "mythical" 63 | volumes: 64 | - "postgres:/var/lib/postgresql/data" 65 | ports: 66 | - "5432:5432" 67 | 68 | # A microservice that makes requests to the API server microservice. Requests are also pushed onto the mythical-queue. 69 | mythical-requester: 70 | #build: 71 | # context: ./source 72 | # dockerfile: docker/Dockerfile 73 | # args: 74 | # SERVICE: mythical-beasts-requester 75 | image: grafana/intro-to-mltp:mythical-beasts-requester-latest 76 | restart: always 77 | depends_on: 78 | mythical-queue: 79 | condition: service_healthy 80 | mythical-server: 81 | condition: service_started 82 | ports: 83 | - "4001:4001" 84 | environment: 85 | - NAMESPACE=production 86 | - LOGS_TARGET=http://alloy:3100/loki/api/v1/push 87 | - TRACING_COLLECTOR_HOST=alloy 88 | - TRACING_COLLECTOR_PORT=4317 89 | - PROFILE_COLLECTOR_HOST=alloy 90 | - PROFILE_COLLECTOR_PORT=4040 91 | - OTEL_EXPORTER_OTLP_TRACES_INSECURE=true 92 | - OTEL_RESOURCE_ATTRIBUTES=ip=1.2.3.4 93 | # Uncomment this line to enable timeshift example in the mythical-requester service, which will use timestamps 94 | # in the log lines themselves to rewrite the default timestamp to the time specified in the logline. 95 | #- TIMESHIFT=true 96 | 97 | # The API server microservice. 98 | # It writes logs directly to the Loki service, exposes metrics for the Prometheus 99 | # service and sends traces to the Grafana Alloy instance. 100 | mythical-server: 101 | #build: 102 | # context: ./source 103 | # dockerfile: docker/Dockerfile 104 | # args: 105 | # SERVICE: mythical-beasts-server 106 | image: grafana/intro-to-mltp:mythical-beasts-server-latest 107 | restart: always 108 | ports: 109 | - "4000:4000" 110 | - "80:80" 111 | depends_on: 112 | - mythical-database 113 | environment: 114 | - NAMESPACE=production 115 | - LOGS_TARGET=http://alloy:3100/loki/api/v1/push 116 | - TRACING_COLLECTOR_HOST=alloy 117 | - TRACING_COLLECTOR_PORT=4317 118 | - PROFILE_COLLECTOR_HOST=alloy 119 | - PROFILE_COLLECTOR_PORT=4040 120 | - OTEL_EXPORTER_OTLP_TRACES_INSECURE=true 121 | - OTEL_RESOURCE_ATTRIBUTES=ip=1.2.3.5 122 | 123 | # A microservice that consumes requests from the mythical-queue 124 | mythical-recorder: 125 | #build: 126 | # context: ./source 127 | # dockerfile: docker/Dockerfile 128 | # args: 129 | # SERVICE: mythical-beasts-recorder 130 | image: grafana/intro-to-mltp:mythical-beasts-recorder-latest 131 | restart: always 132 | depends_on: 133 | mythical-queue: 134 | condition: service_healthy 135 | ports: 136 | - "4002:4002" 137 | environment: 138 | - NAMESPACE=production 139 | - LOGS_TARGET=http://alloy:3100/loki/api/v1/push 140 | - TRACING_COLLECTOR_HOST=alloy 141 | - TRACING_COLLECTOR_PORT=4317 142 | - PROFILE_COLLECTOR_HOST=alloy 143 | - PROFILE_COLLECTOR_PORT=4040 144 | - OTEL_EXPORTER_OTLP_TRACES_INSECURE=true 145 | - OTEL_RESOURCE_ATTRIBUTES=ip=1.2.3.5 146 | 147 | # React frontend for the mythical beasts management system 148 | mythical-frontend: 149 | #build: 150 | # context: ./source/mythical-beasts-frontend 151 | # dockerfile: Dockerfile 152 | # args: 153 | # - REACT_APP_API_URL=/api 154 | # - REACT_APP_ALLOY_ENDPOINT=http://localhost:12350/collect 155 | image: grafana/intro-to-mltp:mythical-beasts-frontend-latest 156 | restart: always 157 | depends_on: 158 | mythical-server: 159 | condition: service_started 160 | alloy: 161 | condition: service_started 162 | ports: 163 | - "3001:80" 164 | 165 | # The Tempo service stores traces send to it by Grafana Alloy, and takes 166 | # queries from Grafana to visualise those traces. 167 | tempo: 168 | image: grafana/tempo:2.8.1 169 | ports: 170 | - "3200:3200" 171 | - "9411:9411" 172 | - "55680:55680" 173 | - "55681:55681" 174 | - "14250:14250" 175 | command: [ "-config.file=/etc/tempo.yaml" ] 176 | volumes: 177 | - "./tempo/tempo.yaml:/etc/tempo.yaml" 178 | 179 | # The Loki service stores logs sent to it, and takes queries from Grafana 180 | # to visualise those logs. 181 | loki: 182 | image: grafana/loki:3.5.1 183 | command: ["--pattern-ingester.enabled=true", "-config.file=/etc/loki/loki.yaml"] 184 | ports: 185 | - "3100:3100" 186 | volumes: 187 | - "./loki/loki.yaml:/etc/loki/loki.yaml" 188 | 189 | mimir: 190 | image: grafana/mimir:2.16.0 191 | command: ["-ingester.native-histograms-ingestion-enabled=true", "-config.file=/etc/mimir.yaml"] 192 | ports: 193 | - "9009:9009" 194 | volumes: 195 | - "./mimir/mimir.yaml:/etc/mimir.yaml" 196 | 197 | k6: 198 | image: grafana/k6:0.58.0 199 | volumes: 200 | - "./k6:/scripts" 201 | environment: 202 | - K6_PROMETHEUS_RW_SERVER_URL=http://mimir:9009/api/v1/push 203 | - K6_DURATION=3600s 204 | - K6_VUS=4 205 | - K6_PROMETHEUS_RW_TREND_AS_NATIVE_HISTOGRAM=true 206 | restart: always 207 | command: ["run", "-o", "experimental-prometheus-rw", "/scripts/mythical-loadtest.js"] 208 | 209 | pyroscope: 210 | image: grafana/pyroscope:1.13.5 211 | ports: 212 | - "4040:4040" 213 | command: ["server"] 214 | 215 | beyla-requester: 216 | image: grafana/beyla:2.1.0 217 | # Beyla requires to be run in the same process namespace as the process it's watching. 218 | # In Docker, we can do this by joining the namespace for the watched process with the Beyla 219 | # container watching it by using a specific `pid` label. 220 | pid: "service:mythical-requester" 221 | # Beyla requires the several system capabilities to run, to add hooks to the underlying kernel. 222 | # Note that you should *always* be aware of the security implications of adding capabilities 223 | # before you do so. 224 | cap_add: 225 | - SYS_ADMIN 226 | - SYS_RESOURCE 227 | - NET_RAW 228 | - DAC_READ_SEARCH 229 | - SYS_PTRACE 230 | - PERFMON 231 | - BPF 232 | - CHECKPOINT_RESTORE 233 | # If using the above capability fails to instrument your service, remove it and uncomment the 234 | # line below. Beware that this will allow Beyla to run with full privileges, which may be 235 | # undesirable. 236 | #privileged: true 237 | command: 238 | - /beyla 239 | - --config=/configs/config.yaml 240 | volumes: 241 | - ./beyla/:/configs 242 | # See the full list of configuration options at 243 | # https://grafana.com/docs/grafana-cloud/monitor-applications/beyla/configure/options/ for more details on the 244 | # options set below. 245 | environment: 246 | BEYLA_OPEN_PORT: "4001" # Instrument any service listening on port 4001. 247 | BEYLA_SERVICE_NAMESPACE: "mythical" # The namespace for the service. 248 | BEYLA_PROMETHEUS_PORT: "9090" # The port to expose Prometheus metrics on. 249 | #BEYLA_BPF_TRACK_REQUEST_HEADERS: "true" 250 | OTEL_SERVICE_NAME: "beyla-mythical-requester" # The service name to use for OpenTelemetry traces. 251 | OTEL_EXPORTER_OTLP_TRACES_INSECURE: "true" # Whether to use an insecure connection to Grafana Alloy. 252 | OTEL_EXPORTER_OTLP_PROTOCOL: "grpc" # The protocol to use to send traces to Grafana Alloy. 253 | OTEL_EXPORTER_OTLP_TRACES_ENDPOINT: "http://alloy:4317" # The endpoint to send traces to. 254 | # The `depends_on` block below ensures that the mythical-requester service is started before Beyla. 255 | depends_on: 256 | mythical-requester: 257 | condition: service_started 258 | 259 | beyla-server: 260 | image: grafana/beyla:2.1.0 261 | # Beyla requires to be run in the same process namespace as the process it's watching. 262 | # In Docker, we can do this by joining the namespace for the watched process with the Beyla 263 | # container watching it by using a specific `pid` label. 264 | pid: "service:mythical-server" 265 | # Beyla requires the several system capabilities to run, to add hooks to the underlying kernel. 266 | # Note that you should *always* be aware of the security implications of adding capabilities 267 | # before you do so. 268 | cap_add: 269 | - SYS_ADMIN 270 | - SYS_RESOURCE 271 | - NET_RAW 272 | - DAC_READ_SEARCH 273 | - SYS_PTRACE 274 | - PERFMON 275 | - BPF 276 | - CHECKPOINT_RESTORE 277 | # If using the above capability fails to instrument your service, remove it and uncomment the 278 | # line below. Beware that this will allow Beyla to run with full privileges, which may be 279 | # undesirable. 280 | #privileged: true 281 | command: 282 | - /beyla 283 | - --config=/configs/config.yaml 284 | volumes: 285 | - ./beyla/:/configs 286 | # See the full list of configuration options at 287 | # https://grafana.com/docs/grafana-cloud/monitor-applications/beyla/configure/options/ for more details on the 288 | # options set below. 289 | environment: 290 | BEYLA_OPEN_PORT: "4000" # Instrument any service listening on port 4000. 291 | BEYLA_SERVICE_NAMESPACE: "mythical" # The namespace for the service. 292 | BEYLA_PROMETHEUS_PORT: "9090" # The port to expose Prometheus metrics on. 293 | #BEYLA_BPF_TRACK_REQUEST_HEADERS: "true" 294 | OTEL_SERVICE_NAME: "beyla-mythical-server" # The service name to use for OpenTelemetry traces. 295 | OTEL_EXPORTER_OTLP_TRACES_INSECURE: "true" # Whether to use an insecure connection to Grafana Alloy. 296 | OTEL_EXPORTER_OTLP_PROTOCOL: "grpc" # The protocol to use to send traces to Grafana Alloy. 297 | OTEL_EXPORTER_OTLP_TRACES_ENDPOINT: "http://alloy:4317" # The endpoint to send traces to. 298 | # The `depends_on` block below ensures that the mythical-server service is started before Beyla. 299 | depends_on: 300 | mythical-server: 301 | condition: service_started 302 | 303 | beyla-recorder: 304 | image: grafana/beyla:2.1.0 305 | # Beyla requires to be run in the same process namespace as the process it's watching. 306 | # In Docker, we can do this by joining the namespace for the watched process with the Beyla 307 | # container watching it by using a specific `pid` label. 308 | pid: "service:mythical-recorder" 309 | # Beyla requires the several system capabilities to run, to add hooks to the underlying kernel. 310 | # Note that you should *always* be aware of the security implications of adding capabilities 311 | # before you do so. 312 | cap_add: 313 | - SYS_ADMIN 314 | - SYS_RESOURCE 315 | - NET_RAW 316 | - DAC_READ_SEARCH 317 | - SYS_PTRACE 318 | - PERFMON 319 | - BPF 320 | - CHECKPOINT_RESTORE 321 | # If using the above capability fails to instrument your service, remove it and uncomment the 322 | # line below. Beware that this will allow Beyla to run with full privileges, which may be 323 | # undesirable. 324 | #privileged: true 325 | command: 326 | - /beyla 327 | - --config=/configs/config.yaml 328 | volumes: 329 | - ./beyla/:/configs 330 | # See the full list of configuration options at 331 | # https://grafana.com/docs/grafana-cloud/monitor-applications/beyla/configure/options/ for more details on the 332 | # options set below. 333 | environment: 334 | BEYLA_OPEN_PORT: "4002" # Instrument any service listening on port 4002. 335 | BEYLA_SERVICE_NAMESPACE: "mythical" # The namespace for the service. 336 | BEYLA_PROMETHEUS_PORT: "9090" # The port to expose Prometheus metrics on. 337 | #BEYLA_BPF_TRACK_REQUEST_HEADERS: "true" 338 | OTEL_SERVICE_NAME: "beyla-mythical-recorder" # The service name to use for OpenTelemetry traces. 339 | OTEL_EXPORTER_OTLP_TRACES_INSECURE: "true" # Whether to use an insecure connection to Grafana Alloy. 340 | OTEL_EXPORTER_OTLP_PROTOCOL: "grpc" # The protocol to use to send traces to Grafana Alloy. 341 | OTEL_EXPORTER_OTLP_TRACES_ENDPOINT: "http://alloy:4317" # The endpoint to send traces to. 342 | # The `depends_on` block below ensures that the mythical-recorder service is started before Beyla. 343 | depends_on: 344 | mythical-recorder: 345 | condition: service_started 346 | -------------------------------------------------------------------------------- /grafana/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/grafana/intro-to-mltp/14f8df4745b93a932cd2c4bf37e9ff1e8d3ad909/grafana/.DS_Store -------------------------------------------------------------------------------- /grafana/definitions/mlt-erroring-endpoints.json: -------------------------------------------------------------------------------- 1 | { 2 | "annotations": { 3 | "list": [ 4 | { 5 | "builtIn": 1, 6 | "datasource": { 7 | "type": "grafana", 8 | "uid": "-- Grafana --" 9 | }, 10 | "enable": true, 11 | "hide": true, 12 | "iconColor": "rgba(0, 211, 255, 1)", 13 | "name": "Annotations & Alerts", 14 | "target": { 15 | "limit": 100, 16 | "matchAny": false, 17 | "tags": [], 18 | "type": "dashboard" 19 | }, 20 | "type": "dashboard" 21 | } 22 | ] 23 | }, 24 | "editable": true, 25 | "fiscalYearStartMonth": 0, 26 | "graphTooltip": 0, 27 | "id": 3, 28 | "links": [ 29 | { 30 | "asDropdown": true, 31 | "icon": "external link", 32 | "includeVars": false, 33 | "keepTime": false, 34 | "tags": [ 35 | "intro-to-mlt" 36 | ], 37 | "targetBlank": false, 38 | "title": "Intro to MLT Dashboards", 39 | "tooltip": "", 40 | "type": "dashboards", 41 | "url": "" 42 | } 43 | ], 44 | "liveNow": false, 45 | "panels": [ 46 | { 47 | "datasource": { 48 | "type": "prometheus", 49 | "uid": "mimir" 50 | }, 51 | "fieldConfig": { 52 | "defaults": { 53 | "color": { 54 | "mode": "palette-classic" 55 | }, 56 | "custom": { 57 | "axisCenteredZero": false, 58 | "axisColorMode": "text", 59 | "axisLabel": "", 60 | "axisPlacement": "auto", 61 | "barAlignment": 0, 62 | "drawStyle": "line", 63 | "fillOpacity": 0, 64 | "gradientMode": "none", 65 | "hideFrom": { 66 | "legend": false, 67 | "tooltip": false, 68 | "viz": false 69 | }, 70 | "lineInterpolation": "linear", 71 | "lineWidth": 1, 72 | "pointSize": 5, 73 | "scaleDistribution": { 74 | "type": "linear" 75 | }, 76 | "showPoints": "auto", 77 | "spanNulls": false, 78 | "stacking": { 79 | "group": "A", 80 | "mode": "none" 81 | }, 82 | "thresholdsStyle": { 83 | "mode": "off" 84 | } 85 | }, 86 | "mappings": [], 87 | "thresholds": { 88 | "mode": "absolute", 89 | "steps": [ 90 | { 91 | "color": "green", 92 | "value": null 93 | }, 94 | { 95 | "color": "red", 96 | "value": 80 97 | } 98 | ] 99 | } 100 | }, 101 | "overrides": [] 102 | }, 103 | "gridPos": { 104 | "h": 8, 105 | "w": 12, 106 | "x": 0, 107 | "y": 0 108 | }, 109 | "id": 3, 110 | "options": { 111 | "legend": { 112 | "calcs": [], 113 | "displayMode": "list", 114 | "placement": "bottom", 115 | "showLegend": true 116 | }, 117 | "tooltip": { 118 | "mode": "single", 119 | "sort": "none" 120 | } 121 | }, 122 | "targets": [ 123 | { 124 | "datasource": { 125 | "type": "prometheus", 126 | "uid": "mimir" 127 | }, 128 | "editorMode": "code", 129 | "exemplar": true, 130 | "expr": "sum(rate(traces_spanmetrics_calls_total{span_kind=\"SPAN_KIND_SERVER\",http_target!~\"/debug/pprof.*\",status_code=\"STATUS_CODE_ERROR\",http_target=~\"$endpoint\"}[1m])) by (http_target)", 131 | "legendFormat": "__auto", 132 | "range": true, 133 | "refId": "A" 134 | } 135 | ], 136 | "title": "Error rates", 137 | "type": "timeseries" 138 | }, 139 | { 140 | "datasource": { 141 | "type": "prometheus", 142 | "uid": "mimir" 143 | }, 144 | "fieldConfig": { 145 | "defaults": { 146 | "color": { 147 | "mode": "palette-classic" 148 | }, 149 | "custom": { 150 | "axisCenteredZero": false, 151 | "axisColorMode": "text", 152 | "axisLabel": "", 153 | "axisPlacement": "auto", 154 | "barAlignment": 0, 155 | "drawStyle": "line", 156 | "fillOpacity": 0, 157 | "gradientMode": "none", 158 | "hideFrom": { 159 | "legend": false, 160 | "tooltip": false, 161 | "viz": false 162 | }, 163 | "lineInterpolation": "linear", 164 | "lineWidth": 1, 165 | "pointSize": 5, 166 | "scaleDistribution": { 167 | "type": "linear" 168 | }, 169 | "showPoints": "auto", 170 | "spanNulls": false, 171 | "stacking": { 172 | "group": "A", 173 | "mode": "none" 174 | }, 175 | "thresholdsStyle": { 176 | "mode": "off" 177 | } 178 | }, 179 | "mappings": [], 180 | "thresholds": { 181 | "mode": "absolute", 182 | "steps": [ 183 | { 184 | "color": "green", 185 | "value": null 186 | }, 187 | { 188 | "color": "red", 189 | "value": 80 190 | } 191 | ] 192 | } 193 | }, 194 | "overrides": [] 195 | }, 196 | "gridPos": { 197 | "h": 8, 198 | "w": 12, 199 | "x": 12, 200 | "y": 0 201 | }, 202 | "id": 4, 203 | "options": { 204 | "legend": { 205 | "calcs": [], 206 | "displayMode": "list", 207 | "placement": "bottom", 208 | "showLegend": true 209 | }, 210 | "tooltip": { 211 | "mode": "single", 212 | "sort": "none" 213 | } 214 | }, 215 | "targets": [ 216 | { 217 | "datasource": { 218 | "type": "prometheus", 219 | "uid": "mimir" 220 | }, 221 | "editorMode": "code", 222 | "exemplar": true, 223 | "expr": "histogram_quantile(.99, sum(rate(traces_spanmetrics_latency_bucket{span_kind=\"SPAN_KIND_SERVER\",http_target!~\"/debug/pprof.*\",status_code=\"STATUS_CODE_ERROR\",http_target=~\"$endpoint\"}[1m])) by (http_target, le))", 224 | "legendFormat": "__auto", 225 | "range": true, 226 | "refId": "A" 227 | } 228 | ], 229 | "title": "p99", 230 | "type": "timeseries" 231 | }, 232 | { 233 | "datasource": { 234 | "type": "tempo", 235 | "uid": "tempo" 236 | }, 237 | "description": "", 238 | "fieldConfig": { 239 | "defaults": { 240 | "custom": { 241 | "align": "auto", 242 | "cellOptions": { 243 | "type": "auto" 244 | }, 245 | "inspect": false 246 | }, 247 | "mappings": [], 248 | "thresholds": { 249 | "mode": "absolute", 250 | "steps": [ 251 | { 252 | "color": "green", 253 | "value": null 254 | }, 255 | { 256 | "color": "red", 257 | "value": 80 258 | } 259 | ] 260 | } 261 | }, 262 | "overrides": [] 263 | }, 264 | "gridPos": { 265 | "h": 15, 266 | "w": 24, 267 | "x": 0, 268 | "y": 8 269 | }, 270 | "id": 1, 271 | "options": { 272 | "footer": { 273 | "countRows": false, 274 | "fields": "", 275 | "reducer": [ 276 | "sum" 277 | ], 278 | "show": false 279 | }, 280 | "showHeader": true 281 | }, 282 | "pluginVersion": "9.4.7", 283 | "targets": [ 284 | { 285 | "datasource": { 286 | "type": "tempo", 287 | "uid": "tempo" 288 | }, 289 | "key": "Q-f807f3aa-fde3-4f3d-96e1-ebff01a76cb9-0", 290 | "limit": 20, 291 | "query": "{ span.http.target =~ \"$endpoint\" && kind = server} >> { status = error } | select(span.db.name, span.db.statement)", 292 | "queryType": "traceql", 293 | "refId": "A" 294 | } 295 | ], 296 | "title": "Root Cause: Errors", 297 | "type": "table" 298 | } 299 | ], 300 | "refresh": "", 301 | "revision": 1, 302 | "schemaVersion": 38, 303 | "style": "dark", 304 | "tags": [ 305 | "intro-to-mlt", 306 | "mythical-beasts" 307 | ], 308 | "templating": { 309 | "list": [ 310 | { 311 | "allValue": ".*", 312 | "current": { 313 | "selected": true, 314 | "text": "All", 315 | "value": "$__all" 316 | }, 317 | "datasource": { 318 | "type": "prometheus", 319 | "uid": "mimir" 320 | }, 321 | "definition": "label_values(traces_spanmetrics_calls_total{span_kind=\"SPAN_KIND_SERVER\",http_target!~\"/debug/pprof.*\"}, http_target)\n", 322 | "hide": 0, 323 | "includeAll": true, 324 | "multi": false, 325 | "name": "endpoint", 326 | "options": [], 327 | "query": { 328 | "query": "label_values(traces_spanmetrics_calls_total{span_kind=\"SPAN_KIND_SERVER\",http_target!~\"/debug/pprof.*\"}, http_target)\n", 329 | "refId": "StandardVariableQuery" 330 | }, 331 | "refresh": 1, 332 | "regex": "", 333 | "skipUrlSync": false, 334 | "sort": 0, 335 | "type": "query" 336 | } 337 | ] 338 | }, 339 | "time": { 340 | "from": "now-5m", 341 | "to": "now" 342 | }, 343 | "timepicker": {}, 344 | "timezone": "", 345 | "title": "MLT Erroring Endpoints", 346 | "uid": "9UKWKDqVy", 347 | "version": 1, 348 | "weekStart": "" 349 | } 350 | -------------------------------------------------------------------------------- /grafana/definitions/mlt.json: -------------------------------------------------------------------------------- 1 | { 2 | "annotations": { 3 | "list": [ 4 | { 5 | "builtIn": 1, 6 | "datasource": { 7 | "type": "datasource", 8 | "uid": "grafana" 9 | }, 10 | "enable": true, 11 | "hide": true, 12 | "iconColor": "rgba(0, 211, 255, 1)", 13 | "name": "Annotations & Alerts", 14 | "target": { 15 | "limit": 100, 16 | "matchAny": false, 17 | "tags": [], 18 | "type": "dashboard" 19 | }, 20 | "type": "dashboard" 21 | } 22 | ] 23 | }, 24 | "editable": true, 25 | "fiscalYearStartMonth": 0, 26 | "graphTooltip": 0, 27 | "id": 4, 28 | "links": [ 29 | { 30 | "asDropdown": true, 31 | "icon": "external link", 32 | "includeVars": false, 33 | "keepTime": false, 34 | "tags": [ 35 | "intro-to-mlt" 36 | ], 37 | "targetBlank": false, 38 | "title": "Intro to MLT Dashboards", 39 | "tooltip": "", 40 | "type": "dashboards", 41 | "url": "" 42 | } 43 | ], 44 | "liveNow": false, 45 | "panels": [ 46 | { 47 | "datasource": { 48 | "type": "prometheus", 49 | "uid": "mimir" 50 | }, 51 | "description": "", 52 | "fieldConfig": { 53 | "defaults": { 54 | "color": { 55 | "mode": "palette-classic" 56 | }, 57 | "custom": { 58 | "axisCenteredZero": false, 59 | "axisColorMode": "text", 60 | "axisLabel": "", 61 | "axisPlacement": "auto", 62 | "axisShow": false, 63 | "barAlignment": 0, 64 | "drawStyle": "line", 65 | "fillOpacity": 57, 66 | "gradientMode": "hue", 67 | "hideFrom": { 68 | "legend": false, 69 | "tooltip": false, 70 | "viz": false 71 | }, 72 | "insertNulls": false, 73 | "lineInterpolation": "linear", 74 | "lineStyle": { 75 | "fill": "solid" 76 | }, 77 | "lineWidth": 1, 78 | "pointSize": 7, 79 | "scaleDistribution": { 80 | "type": "linear" 81 | }, 82 | "showPoints": "never", 83 | "spanNulls": false, 84 | "stacking": { 85 | "group": "A", 86 | "mode": "none" 87 | }, 88 | "thresholdsStyle": { 89 | "mode": "off" 90 | } 91 | }, 92 | "links": [], 93 | "mappings": [], 94 | "thresholds": { 95 | "mode": "absolute", 96 | "steps": [ 97 | { 98 | "color": "green", 99 | "value": null 100 | }, 101 | { 102 | "color": "red", 103 | "value": 80 104 | } 105 | ] 106 | } 107 | }, 108 | "overrides": [] 109 | }, 110 | "gridPos": { 111 | "h": 11, 112 | "w": 16, 113 | "x": 0, 114 | "y": 0 115 | }, 116 | "id": 11, 117 | "options": { 118 | "legend": { 119 | "calcs": [], 120 | "displayMode": "list", 121 | "placement": "bottom", 122 | "showLegend": true 123 | }, 124 | "tooltip": { 125 | "mode": "single", 126 | "sort": "none" 127 | } 128 | }, 129 | "targets": [ 130 | { 131 | "datasource": { 132 | "type": "prometheus", 133 | "uid": "mimir" 134 | }, 135 | "editorMode": "code", 136 | "exemplar": true, 137 | "expr": "(sum by (http_target)(rate(traces_spanmetrics_latency_sum{http_target=~\"${httpEndpoint}\",service_version=~\"${serviceVersion}\"}[1m]))) / (sum by (http_target)(rate(traces_spanmetrics_latency_count{http_target=~\"${httpEndpoint}\",service_version=~\"${serviceVersion}\"}[1m])))", 138 | "interval": "", 139 | "legendFormat": "", 140 | "range": true, 141 | "refId": "A" 142 | } 143 | ], 144 | "title": "Request Rate", 145 | "type": "timeseries" 146 | }, 147 | { 148 | "datasource": { 149 | "type": "prometheus", 150 | "uid": "mimir" 151 | }, 152 | "fieldConfig": { 153 | "defaults": { 154 | "color": { 155 | "mode": "thresholds" 156 | }, 157 | "mappings": [], 158 | "thresholds": { 159 | "mode": "absolute", 160 | "steps": [ 161 | { 162 | "color": "green", 163 | "value": null 164 | }, 165 | { 166 | "color": "red", 167 | "value": 5 168 | } 169 | ] 170 | } 171 | }, 172 | "overrides": [] 173 | }, 174 | "gridPos": { 175 | "h": 4, 176 | "w": 8, 177 | "x": 16, 178 | "y": 0 179 | }, 180 | "id": 13, 181 | "options": { 182 | "minVizHeight": 75, 183 | "minVizWidth": 75, 184 | "orientation": "auto", 185 | "reduceOptions": { 186 | "calcs": [ 187 | "lastNotNull" 188 | ], 189 | "fields": "", 190 | "values": false 191 | }, 192 | "showThresholdLabels": false, 193 | "showThresholdMarkers": true 194 | }, 195 | "pluginVersion": "10.2.0-133752", 196 | "targets": [ 197 | { 198 | "datasource": { 199 | "type": "prometheus", 200 | "uid": "mimir" 201 | }, 202 | "exemplar": true, 203 | "expr": "(sum(increase(traces_spanmetrics_calls_total{status_code=\"STATUS_CODE_ERROR\",http_target=~\"${httpEndpoint}\",service_version=~\"${serviceVersion}\"}[5m]))/sum(increase(traces_spanmetrics_calls_total{status_code!=\"\",http_target=~\"${httpEndpoint}\",service_version=~\"${serviceVersion}\"}[5m]))) * 100", 204 | "interval": "", 205 | "legendFormat": "", 206 | "refId": "A" 207 | } 208 | ], 209 | "title": "Overall Error %age", 210 | "type": "gauge" 211 | }, 212 | { 213 | "datasource": { 214 | "type": "prometheus", 215 | "uid": "mimir" 216 | }, 217 | "fieldConfig": { 218 | "defaults": { 219 | "color": { 220 | "mode": "thresholds" 221 | }, 222 | "links": [ 223 | { 224 | "targetBlank": true, 225 | "title": "Traces for Erroring Endpoint", 226 | "url": "/explore?orgId=1&left={\"datasource\":\"tempo\",\"queries\":[{\"refId\":\"A\",\"datasource\":{\"type\":\"tempo\",\"uid\":\"tempo\"},\"queryType\":\"traceql\",\"limit\":20,\"query\":\"{ %2Ehttp%2Etarget = \\\"${__field.labels.http_target}\\\" %26%26 status = error }\"}],\"range\":{\"from\":\"now-1h\",\"to\":\"now\"}}" 227 | } 228 | ], 229 | "mappings": [], 230 | "thresholds": { 231 | "mode": "absolute", 232 | "steps": [ 233 | { 234 | "color": "green", 235 | "value": null 236 | }, 237 | { 238 | "color": "red", 239 | "value": 5 240 | } 241 | ] 242 | } 243 | }, 244 | "overrides": [] 245 | }, 246 | "gridPos": { 247 | "h": 15, 248 | "w": 8, 249 | "x": 16, 250 | "y": 4 251 | }, 252 | "id": 10, 253 | "options": { 254 | "colorMode": "value", 255 | "graphMode": "area", 256 | "justifyMode": "auto", 257 | "orientation": "auto", 258 | "reduceOptions": { 259 | "calcs": [ 260 | "lastNotNull" 261 | ], 262 | "fields": "", 263 | "values": false 264 | }, 265 | "textMode": "auto" 266 | }, 267 | "pluginVersion": "10.2.0-133752", 268 | "targets": [ 269 | { 270 | "datasource": { 271 | "type": "prometheus", 272 | "uid": "mimir" 273 | }, 274 | "editorMode": "code", 275 | "exemplar": false, 276 | "expr": "(sum by (http_target)(increase(traces_spanmetrics_calls_total{status_code=\"STATUS_CODE_ERROR\",http_target=~\"${httpEndpoint}\",service_version=~\"${serviceVersion}\"}[5m])))/(sum by (http_target)(increase(traces_spanmetrics_calls_total{status_code!=\"\",http_target=~\"${httpEndpoint}\",service_version=~\"${serviceVersion}\"}[5m]))) * 100", 277 | "instant": false, 278 | "interval": "", 279 | "legendFormat": "{{http_target}}", 280 | "range": true, 281 | "refId": "A" 282 | } 283 | ], 284 | "title": "Error Percentages by Target", 285 | "type": "stat" 286 | }, 287 | { 288 | "datasource": { 289 | "type": "prometheus", 290 | "uid": "mimir" 291 | }, 292 | "fieldConfig": { 293 | "defaults": { 294 | "color": { 295 | "mode": "thresholds" 296 | }, 297 | "custom": { 298 | "align": "auto", 299 | "cellOptions": { 300 | "type": "auto" 301 | }, 302 | "inspect": false 303 | }, 304 | "links": [ 305 | { 306 | "targetBlank": true, 307 | "title": "Traces for Status Code/Endpoint", 308 | "url": "/explore?orgId=1&left={\"datasource\":\"tempo\",\"queries\":[{\"refId\":\"A\",\"datasource\":{\"type\":\"tempo\",\"uid\":\"tempo\"},\"queryType\":\"traceql\",\"limit\":20,\"query\":\"{ %2Ehttp%2Etarget = \\\"${__data.fields.http_target}\\\" %26%26 %2Ehttp%2Estatus_code = ${__data.fields.http_status_code} }\"}],\"range\":{\"from\":\"now-1h\",\"to\":\"now\"}}" 309 | } 310 | ], 311 | "mappings": [], 312 | "thresholds": { 313 | "mode": "absolute", 314 | "steps": [ 315 | { 316 | "color": "green", 317 | "value": null 318 | }, 319 | { 320 | "color": "red", 321 | "value": 80 322 | } 323 | ] 324 | } 325 | }, 326 | "overrides": [] 327 | }, 328 | "gridPos": { 329 | "h": 8, 330 | "w": 16, 331 | "x": 0, 332 | "y": 11 333 | }, 334 | "id": 15, 335 | "options": { 336 | "cellHeight": "sm", 337 | "footer": { 338 | "countRows": false, 339 | "fields": "", 340 | "reducer": [ 341 | "sum" 342 | ], 343 | "show": false 344 | }, 345 | "showHeader": true 346 | }, 347 | "pluginVersion": "10.2.0-133752", 348 | "targets": [ 349 | { 350 | "datasource": { 351 | "type": "prometheus", 352 | "uid": "mimir" 353 | }, 354 | "editorMode": "code", 355 | "exemplar": false, 356 | "expr": "sum by (http_status_code,http_target,service_version)(increase(traces_spanmetrics_calls_total{http_status_code=~\"${httpStatus}\",http_target=~\"${httpEndpoint}\"}[10m]))", 357 | "format": "table", 358 | "instant": true, 359 | "legendFormat": "__auto", 360 | "range": false, 361 | "refId": "A" 362 | } 363 | ], 364 | "title": "HTTP Status codes by Endpoint", 365 | "transformations": [ 366 | { 367 | "id": "filterFieldsByName", 368 | "options": { 369 | "include": { 370 | "names": [ 371 | "http_status_code", 372 | "http_target", 373 | "Value" 374 | ] 375 | } 376 | } 377 | }, 378 | { 379 | "id": "sortBy", 380 | "options": { 381 | "fields": {}, 382 | "sort": [ 383 | { 384 | "field": "http_status_code" 385 | } 386 | ] 387 | } 388 | } 389 | ], 390 | "type": "table" 391 | }, 392 | { 393 | "datasource": { 394 | "type": "prometheus", 395 | "uid": "mimir" 396 | }, 397 | "description": "", 398 | "fieldConfig": { 399 | "defaults": { 400 | "color": { 401 | "mode": "thresholds" 402 | }, 403 | "custom": { 404 | "align": "auto", 405 | "cellOptions": { 406 | "type": "auto" 407 | }, 408 | "filterable": false, 409 | "inspect": false 410 | }, 411 | "links": [ 412 | { 413 | "title": "Traces for Latencies for Method/Endpoint", 414 | "url": "/explore?orgId=1&left={\"datasource\":\"tempo\",\"queries\":[{\"refId\":\"A\",\"datasource\":{\"type\":\"tempo\",\"uid\":\"tempo\"},\"queryType\":\"traceql\",\"limit\":20,\"query\":\"{ %2Ehttp%2Etarget = \\\"${__data.fields.http_target}\\\" %26%26 %2Ehttp%2Emethod = \\\"${__data.fields[\"HTTP Method\"]}\\\" %26%26 %2Eservice%2Eversion = \\\"${__data.fields.service_version}\\\" }\"}],\"range\":{\"from\":\"now-1h\",\"to\":\"now\"}}" 415 | } 416 | ], 417 | "mappings": [], 418 | "thresholds": { 419 | "mode": "absolute", 420 | "steps": [ 421 | { 422 | "color": "green", 423 | "value": null 424 | }, 425 | { 426 | "color": "red", 427 | "value": 65 428 | } 429 | ] 430 | }, 431 | "unit": "s" 432 | }, 433 | "overrides": [ 434 | { 435 | "matcher": { 436 | "id": "byName", 437 | "options": "Value #A" 438 | }, 439 | "properties": [ 440 | { 441 | "id": "custom.cellOptions", 442 | "value": { 443 | "mode": "lcd", 444 | "type": "gauge" 445 | } 446 | } 447 | ] 448 | } 449 | ] 450 | }, 451 | "gridPos": { 452 | "h": 12, 453 | "w": 11, 454 | "x": 0, 455 | "y": 19 456 | }, 457 | "id": 2, 458 | "links": [], 459 | "options": { 460 | "cellHeight": "sm", 461 | "footer": { 462 | "countRows": false, 463 | "fields": "", 464 | "reducer": [ 465 | "sum" 466 | ], 467 | "show": false 468 | }, 469 | "frameIndex": 0, 470 | "showHeader": true, 471 | "sortBy": [ 472 | { 473 | "desc": true, 474 | "displayName": "Average Latency" 475 | } 476 | ] 477 | }, 478 | "pluginVersion": "10.2.0-133752", 479 | "targets": [ 480 | { 481 | "datasource": { 482 | "type": "prometheus", 483 | "uid": "mimir" 484 | }, 485 | "editorMode": "code", 486 | "exemplar": false, 487 | "expr": "topk(10, sum by (http_method,http_target,service_version)(increase(traces_spanmetrics_latency_sum{http_method=~\".+\", http_target=~\"${httpEndpoint}\",service_version=~\"${serviceVersion}\"}[1m]) / increase(traces_spanmetrics_latency_count{http_method=~\".+\", http_target=~\"${httpEndpoint}\",service_version=~\"${serviceVersion}\"}[1m])))", 488 | "format": "table", 489 | "instant": true, 490 | "interval": "", 491 | "legendFormat": "", 492 | "refId": "A" 493 | } 494 | ], 495 | "title": "Top 10 Highest Endpoint Latencies Over Last Minute", 496 | "transformations": [ 497 | { 498 | "id": "filterFieldsByName", 499 | "options": { 500 | "include": { 501 | "names": [ 502 | "http_method", 503 | "http_target", 504 | "Value", 505 | "service_version" 506 | ] 507 | } 508 | } 509 | }, 510 | { 511 | "id": "organize", 512 | "options": { 513 | "excludeByName": { 514 | "Value": false 515 | }, 516 | "indexByName": {}, 517 | "renameByName": { 518 | "Value": "Latency (ms)", 519 | "Value #A": "Average Latency", 520 | "http_method": "HTTP Method", 521 | "http_target": "Endpoint" 522 | } 523 | } 524 | } 525 | ], 526 | "type": "table" 527 | }, 528 | { 529 | "datasource": { 530 | "type": "prometheus", 531 | "uid": "mimir" 532 | }, 533 | "description": "", 534 | "fieldConfig": { 535 | "defaults": { 536 | "color": { 537 | "mode": "palette-classic" 538 | }, 539 | "custom": { 540 | "axisCenteredZero": false, 541 | "axisColorMode": "text", 542 | "axisLabel": "", 543 | "axisPlacement": "auto", 544 | "axisShow": false, 545 | "barAlignment": 0, 546 | "drawStyle": "line", 547 | "fillOpacity": 23, 548 | "gradientMode": "hue", 549 | "hideFrom": { 550 | "legend": false, 551 | "tooltip": false, 552 | "viz": false 553 | }, 554 | "insertNulls": false, 555 | "lineInterpolation": "smooth", 556 | "lineStyle": { 557 | "fill": "solid" 558 | }, 559 | "lineWidth": 1, 560 | "pointSize": 7, 561 | "scaleDistribution": { 562 | "type": "linear" 563 | }, 564 | "showPoints": "auto", 565 | "spanNulls": false, 566 | "stacking": { 567 | "group": "A", 568 | "mode": "none" 569 | }, 570 | "thresholdsStyle": { 571 | "mode": "off" 572 | } 573 | }, 574 | "links": [], 575 | "mappings": [], 576 | "thresholds": { 577 | "mode": "absolute", 578 | "steps": [ 579 | { 580 | "color": "green", 581 | "value": null 582 | }, 583 | { 584 | "color": "red", 585 | "value": 80 586 | } 587 | ] 588 | } 589 | }, 590 | "overrides": [] 591 | }, 592 | "gridPos": { 593 | "h": 12, 594 | "w": 13, 595 | "x": 11, 596 | "y": 19 597 | }, 598 | "id": 4, 599 | "options": { 600 | "legend": { 601 | "calcs": [], 602 | "displayMode": "list", 603 | "placement": "bottom", 604 | "showLegend": true 605 | }, 606 | "tooltip": { 607 | "mode": "single", 608 | "sort": "none" 609 | } 610 | }, 611 | "targets": [ 612 | { 613 | "datasource": { 614 | "type": "prometheus", 615 | "uid": "mimir" 616 | }, 617 | "editorMode": "code", 618 | "exemplar": true, 619 | "expr": "sum by (http_method,http_target)(increase(traces_spanmetrics_latency_sum{http_method=~\".+\", http_target=~\"${httpEndpoint}\",service_version=~\"${serviceVersion}\"}[10m]) / increase(traces_spanmetrics_latency_count{http_method=~\".+\", http_target=~\"${httpEndpoint}\",service_version=~\"${serviceVersion}\"}[10m]))", 620 | "interval": "", 621 | "legendFormat": "", 622 | "range": true, 623 | "refId": "A" 624 | } 625 | ], 626 | "title": "All Endpoint Latencies in ms (Last 10 mins)", 627 | "type": "timeseries" 628 | }, 629 | { 630 | "datasource": { 631 | "type": "prometheus", 632 | "uid": "mimir" 633 | }, 634 | "fieldConfig": { 635 | "defaults": { 636 | "color": { 637 | "mode": "palette-classic" 638 | }, 639 | "custom": { 640 | "axisCenteredZero": false, 641 | "axisColorMode": "text", 642 | "axisLabel": "", 643 | "axisPlacement": "auto", 644 | "axisShow": false, 645 | "barAlignment": 0, 646 | "drawStyle": "line", 647 | "fillOpacity": 35, 648 | "gradientMode": "opacity", 649 | "hideFrom": { 650 | "legend": false, 651 | "tooltip": false, 652 | "viz": false 653 | }, 654 | "insertNulls": false, 655 | "lineInterpolation": "smooth", 656 | "lineStyle": { 657 | "fill": "solid" 658 | }, 659 | "lineWidth": 1, 660 | "pointSize": 5, 661 | "scaleDistribution": { 662 | "type": "linear" 663 | }, 664 | "showPoints": "never", 665 | "spanNulls": false, 666 | "stacking": { 667 | "group": "A", 668 | "mode": "none" 669 | }, 670 | "thresholdsStyle": { 671 | "mode": "off" 672 | } 673 | }, 674 | "mappings": [], 675 | "thresholds": { 676 | "mode": "absolute", 677 | "steps": [ 678 | { 679 | "color": "green", 680 | "value": null 681 | }, 682 | { 683 | "color": "red", 684 | "value": 80 685 | } 686 | ] 687 | } 688 | }, 689 | "overrides": [] 690 | }, 691 | "gridPos": { 692 | "h": 10, 693 | "w": 24, 694 | "x": 0, 695 | "y": 31 696 | }, 697 | "id": 8, 698 | "options": { 699 | "legend": { 700 | "calcs": [], 701 | "displayMode": "list", 702 | "placement": "bottom", 703 | "showLegend": true 704 | }, 705 | "tooltip": { 706 | "mode": "single", 707 | "sort": "none" 708 | } 709 | }, 710 | "targets": [ 711 | { 712 | "datasource": { 713 | "type": "prometheus", 714 | "uid": "mimir" 715 | }, 716 | "editorMode": "code", 717 | "exemplar": true, 718 | "expr": "histogram_quantile(0.95, sum(rate(mythical_request_times_bucket[$__rate_interval])) by (le, beast))", 719 | "interval": "", 720 | "legendFormat": "{{le}}", 721 | "range": true, 722 | "refId": "A" 723 | } 724 | ], 725 | "title": "95th Percentile Response Latencies (ms)", 726 | "type": "timeseries" 727 | }, 728 | { 729 | "datasource": { 730 | "type": "loki", 731 | "uid": "loki" 732 | }, 733 | "gridPos": { 734 | "h": 8, 735 | "w": 24, 736 | "x": 0, 737 | "y": 41 738 | }, 739 | "id": 6, 740 | "options": { 741 | "dedupStrategy": "none", 742 | "enableLogDetails": true, 743 | "prettifyLogMessage": false, 744 | "showCommonLabels": false, 745 | "showLabels": false, 746 | "showTime": true, 747 | "sortOrder": "Descending", 748 | "wrapLogMessage": false 749 | }, 750 | "targets": [ 751 | { 752 | "datasource": { 753 | "type": "loki", 754 | "uid": "loki" 755 | }, 756 | "expr": "{job=\"alloy\"} | logfmt | status=\"Error\"", 757 | "refId": "A" 758 | } 759 | ], 760 | "title": "Autologged Errors", 761 | "type": "logs" 762 | } 763 | ], 764 | "refresh": "5s", 765 | "revision": 1, 766 | "schemaVersion": 38, 767 | "tags": [ 768 | "intro-to-mlt", 769 | "mythical-beasts" 770 | ], 771 | "templating": { 772 | "list": [ 773 | { 774 | "current": { 775 | "selected": true, 776 | "text": [ 777 | "All" 778 | ], 779 | "value": [ 780 | "$__all" 781 | ] 782 | }, 783 | "datasource": { 784 | "type": "tempo", 785 | "uid": "tempo" 786 | }, 787 | "definition": "", 788 | "description": "HTTP Status", 789 | "hide": 0, 790 | "includeAll": true, 791 | "label": "HTTP Status", 792 | "multi": true, 793 | "name": "httpStatus", 794 | "options": [], 795 | "query": { 796 | "label": "http.status_code", 797 | "refId": "TempoDatasourceVariableQueryEditor-VariableQuery", 798 | "type": 1 799 | }, 800 | "refresh": 2, 801 | "regex": "", 802 | "skipUrlSync": false, 803 | "sort": 0, 804 | "type": "query" 805 | }, 806 | { 807 | "current": { 808 | "selected": true, 809 | "text": [ 810 | "All" 811 | ], 812 | "value": [ 813 | "$__all" 814 | ] 815 | }, 816 | "datasource": { 817 | "type": "tempo", 818 | "uid": "tempo" 819 | }, 820 | "definition": "", 821 | "description": "HTTP Endpoint", 822 | "hide": 0, 823 | "includeAll": true, 824 | "label": "HTTP Endpoint", 825 | "multi": true, 826 | "name": "httpEndpoint", 827 | "options": [], 828 | "query": { 829 | "label": "http.target", 830 | "refId": "TempoDatasourceVariableQueryEditor-VariableQuery", 831 | "type": 1 832 | }, 833 | "refresh": 2, 834 | "regex": "/^(\\/beholder|\\/unicorn|\\/manticore|\\/illithid|\\/owlbear).*/", 835 | "skipUrlSync": false, 836 | "sort": 0, 837 | "type": "query" 838 | }, 839 | { 840 | "current": { 841 | "selected": true, 842 | "text": [ 843 | "All" 844 | ], 845 | "value": [ 846 | "$__all" 847 | ] 848 | }, 849 | "datasource": { 850 | "type": "tempo", 851 | "uid": "tempo" 852 | }, 853 | "definition": "", 854 | "description": "Service version", 855 | "hide": 0, 856 | "includeAll": true, 857 | "label": "Service Version", 858 | "multi": true, 859 | "name": "serviceVersion", 860 | "options": [], 861 | "query": { 862 | "label": "service.version", 863 | "refId": "TempoDatasourceVariableQueryEditor-VariableQuery", 864 | "type": 1 865 | }, 866 | "refresh": 2, 867 | "regex": "", 868 | "skipUrlSync": false, 869 | "sort": 0, 870 | "type": "query" 871 | } 872 | ] 873 | }, 874 | "time": { 875 | "from": "now-5m", 876 | "to": "now" 877 | }, 878 | "timepicker": {}, 879 | "timezone": "", 880 | "title": "MLT Dashboard", 881 | "uid": "ed4f4709-4d3b-48fd-a311-a036b85dbd5b", 882 | "version": 4, 883 | "weekStart": "" 884 | } 885 | -------------------------------------------------------------------------------- /grafana/definitions/traces-in-dashboards.json: -------------------------------------------------------------------------------- 1 | { 2 | "annotations": { 3 | "list": [ 4 | { 5 | "builtIn": 1, 6 | "datasource": { 7 | "type": "grafana", 8 | "uid": "-- Grafana --" 9 | }, 10 | "enable": true, 11 | "hide": true, 12 | "iconColor": "rgba(0, 211, 255, 1)", 13 | "name": "Annotations & Alerts", 14 | "type": "dashboard" 15 | } 16 | ] 17 | }, 18 | "editable": true, 19 | "fiscalYearStartMonth": 0, 20 | "graphTooltip": 0, 21 | "id": 4, 22 | "links": [], 23 | "liveNow": false, 24 | "panels": [ 25 | { 26 | "datasource": { 27 | "type": "prometheus", 28 | "uid": "mimir" 29 | }, 30 | "fieldConfig": { 31 | "defaults": { 32 | "color": { 33 | "mode": "palette-classic" 34 | }, 35 | "custom": { 36 | "axisBorderShow": false, 37 | "axisCenteredZero": false, 38 | "axisColorMode": "text", 39 | "axisLabel": "", 40 | "axisPlacement": "auto", 41 | "barAlignment": 0, 42 | "drawStyle": "line", 43 | "fillOpacity": 0, 44 | "gradientMode": "none", 45 | "hideFrom": { 46 | "legend": false, 47 | "tooltip": false, 48 | "viz": false 49 | }, 50 | "insertNulls": false, 51 | "lineInterpolation": "linear", 52 | "lineWidth": 1, 53 | "pointSize": 5, 54 | "scaleDistribution": { 55 | "type": "linear" 56 | }, 57 | "showPoints": "auto", 58 | "spanNulls": false, 59 | "stacking": { 60 | "group": "A", 61 | "mode": "none" 62 | }, 63 | "thresholdsStyle": { 64 | "mode": "off" 65 | } 66 | }, 67 | "mappings": [], 68 | "thresholds": { 69 | "mode": "absolute", 70 | "steps": [ 71 | { 72 | "color": "green", 73 | "value": null 74 | }, 75 | { 76 | "color": "red", 77 | "value": 80 78 | } 79 | ] 80 | }, 81 | "unitScale": true 82 | }, 83 | "overrides": [] 84 | }, 85 | "gridPos": { 86 | "h": 8, 87 | "w": 12, 88 | "x": 0, 89 | "y": 0 90 | }, 91 | "id": 4, 92 | "options": { 93 | "legend": { 94 | "calcs": [], 95 | "displayMode": "list", 96 | "placement": "bottom", 97 | "showLegend": true 98 | }, 99 | "tooltip": { 100 | "mode": "single", 101 | "sort": "none" 102 | } 103 | }, 104 | "targets": [ 105 | { 106 | "datasource": { 107 | "type": "prometheus", 108 | "uid": "mimir" 109 | }, 110 | "editorMode": "code", 111 | "expr": "sum by (http_method)(rate(traces_spanmetrics_calls_total{service_name=\"mythical-server\",http_method=~\"${httpMethod}\"}[1m]))", 112 | "legendFormat": "", 113 | "range": true, 114 | "refId": "A" 115 | } 116 | ], 117 | "title": "Calls total by HTTP Method", 118 | "type": "timeseries" 119 | }, 120 | { 121 | "datasource": { 122 | "type": "loki", 123 | "uid": "loki" 124 | }, 125 | "gridPos": { 126 | "h": 8, 127 | "w": 12, 128 | "x": 12, 129 | "y": 0 130 | }, 131 | "id": 3, 132 | "options": { 133 | "dedupStrategy": "none", 134 | "enableLogDetails": true, 135 | "prettifyLogMessage": false, 136 | "showCommonLabels": false, 137 | "showLabels": false, 138 | "showTime": false, 139 | "sortOrder": "Descending", 140 | "wrapLogMessage": false 141 | }, 142 | "targets": [ 143 | { 144 | "datasource": { 145 | "type": "loki", 146 | "uid": "loki" 147 | }, 148 | "editorMode": "code", 149 | "expr": "{service_name=\"mythical-server\"} |~ \"http.method=${httpMethod}\"", 150 | "queryType": "range", 151 | "refId": "A" 152 | } 153 | ], 154 | "title": "Logs by HTTP Method", 155 | "type": "logs" 156 | }, 157 | { 158 | "datasource": { 159 | "type": "tempo", 160 | "uid": "tempo" 161 | }, 162 | "gridPos": { 163 | "h": 11, 164 | "w": 24, 165 | "x": 0, 166 | "y": 8 167 | }, 168 | "id": 1, 169 | "targets": [ 170 | { 171 | "datasource": { 172 | "type": "tempo", 173 | "uid": "tempo" 174 | }, 175 | "filters": [ 176 | { 177 | "id": "c50ad6ea", 178 | "operator": "=", 179 | "scope": "span" 180 | } 181 | ], 182 | "groupBy": [ 183 | { 184 | "id": "aa0005e2", 185 | "scope": "span" 186 | } 187 | ], 188 | "limit": 20, 189 | "query": "${dashTraceID}", 190 | "queryType": "traceql", 191 | "refId": "A", 192 | "tableType": "traces" 193 | } 194 | ], 195 | "title": "Selected Trace", 196 | "type": "traces" 197 | }, 198 | { 199 | "datasource": { 200 | "type": "tempo", 201 | "uid": "tempo" 202 | }, 203 | "fieldConfig": { 204 | "defaults": { 205 | "color": { 206 | "mode": "thresholds" 207 | }, 208 | "custom": { 209 | "align": "auto", 210 | "cellOptions": { 211 | "type": "auto" 212 | }, 213 | "inspect": false 214 | }, 215 | "links": [ 216 | { 217 | "title": "Render trace in panel", 218 | "url": "/d/b550438e-5e9a-4bfa-8d1d-68a0104c09f2/traceql-panels?orgId=1&var-dashTraceID=${__data.fields[\"traceID\"]}" 219 | } 220 | ], 221 | "mappings": [], 222 | "thresholds": { 223 | "mode": "absolute", 224 | "steps": [ 225 | { 226 | "color": "green", 227 | "value": null 228 | }, 229 | { 230 | "color": "red", 231 | "value": 80 232 | } 233 | ] 234 | }, 235 | "unitScale": true 236 | }, 237 | "overrides": [] 238 | }, 239 | "gridPos": { 240 | "h": 10, 241 | "w": 24, 242 | "x": 0, 243 | "y": 19 244 | }, 245 | "id": 2, 246 | "options": { 247 | "cellHeight": "sm", 248 | "footer": { 249 | "countRows": false, 250 | "fields": "", 251 | "reducer": [ 252 | "sum" 253 | ], 254 | "show": false 255 | }, 256 | "showHeader": true 257 | }, 258 | "pluginVersion": "10.3.3", 259 | "targets": [ 260 | { 261 | "datasource": { 262 | "type": "tempo", 263 | "uid": "tempo" 264 | }, 265 | "limit": 20, 266 | "query": "{ .service.name = \"mythical-server\" && .http.method=~\"${httpMethod}\" }", 267 | "queryType": "traceql", 268 | "refId": "A", 269 | "tableType": "traces" 270 | } 271 | ], 272 | "title": "Traces by HTTP Method", 273 | "type": "table" 274 | } 275 | ], 276 | "refresh": "", 277 | "schemaVersion": 39, 278 | "tags": [], 279 | "templating": { 280 | "list": [ 281 | { 282 | "current": { 283 | "selected": false, 284 | "text": "", 285 | "value": "" 286 | }, 287 | "description": "Trace ID to be rendered in dashboard", 288 | "hide": 0, 289 | "includeAll": false, 290 | "label": "Trace ID", 291 | "multi": false, 292 | "name": "dashTraceID", 293 | "options": [], 294 | "query": "", 295 | "queryValue": "", 296 | "skipUrlSync": false, 297 | "type": "custom" 298 | }, 299 | { 300 | "current": { 301 | "selected": true, 302 | "text": [ 303 | "All" 304 | ], 305 | "value": [ 306 | "$__all" 307 | ] 308 | }, 309 | "datasource": { 310 | "type": "tempo", 311 | "uid": "tempo" 312 | }, 313 | "definition": "", 314 | "description": "HTTP Method to run TraceQL for.", 315 | "hide": 0, 316 | "includeAll": true, 317 | "label": "HTTP Method", 318 | "multi": true, 319 | "name": "httpMethod", 320 | "options": [], 321 | "query": { 322 | "label": "http.method", 323 | "refId": "TempoDatasourceVariableQueryEditor-VariableQuery", 324 | "type": 1 325 | }, 326 | "refresh": 1, 327 | "regex": "", 328 | "skipUrlSync": false, 329 | "sort": 0, 330 | "type": "query" 331 | } 332 | ] 333 | }, 334 | "time": { 335 | "from": "now-15m", 336 | "to": "now" 337 | }, 338 | "timepicker": {}, 339 | "timezone": "", 340 | "title": "Traces in Dashboards", 341 | "uid": "b550438e-5e9a-4bfa-8d1d-68a0104c09f2", 342 | "version": 1, 343 | "weekStart": "" 344 | } 345 | -------------------------------------------------------------------------------- /grafana/provisioning/dashboards/mlt.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: 1 2 | 3 | providers: 4 | - name: 'MLT' 5 | orgId: 1 6 | type: file 7 | disableDeletion: false 8 | editable: false 9 | updateIntervalSeconds: 10 10 | options: 11 | path: /var/lib/grafana/dashboards/ 12 | -------------------------------------------------------------------------------- /grafana/provisioning/datasources/datasources.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: 1 2 | 3 | datasources: 4 | - name: Loki 5 | type: loki 6 | access: proxy 7 | uid: loki 8 | url: http://loki:3100 9 | jsonData: 10 | derivedFields: 11 | - datasourceUid: tempo 12 | matcherRegex: "^.*?traceI[d|D]=(\\w+).*$" 13 | name: traceId 14 | url: '$${__value.raw}' 15 | 16 | - name: Tempo 17 | type: tempo 18 | access: proxy 19 | uid: tempo 20 | url: http://tempo:3200 21 | jsonData: 22 | nodeGraph: 23 | enabled: true 24 | serviceMap: 25 | datasourceUid: 'Mimir' 26 | tracesToLogs: 27 | datasourceUid: loki 28 | filterByTraceID: false 29 | spanEndTimeShift: "500ms" 30 | spanStartTimeShift: "-500ms" 31 | tags: ['beast'] 32 | correlations: 33 | - targetUID: postgres 34 | label: "Count $$beast in table" 35 | description: '' 36 | config: 37 | type: query 38 | field: tags 39 | target: 40 | editorMode: code 41 | format: table 42 | rawQuery: true 43 | rawSql: "SELECT COUNT(*) FROM $$beast;" 44 | refId: A 45 | sql: 46 | columns: 47 | - parameters: [] 48 | type: function 49 | groupBy: 50 | - property: 51 | type: string 52 | type: groupBy 53 | limit: 50 54 | transformations: 55 | - type: regex 56 | expression: .*{"value":"(.*?)","key":"beast".*} 57 | mapValue: beast 58 | - targetUID: postgres 59 | label: "$$statement" 60 | description: "Runs the found DB statement in the span" 61 | config: 62 | type: query 63 | field: tags 64 | target: 65 | editorMode: code 66 | filters: 67 | - operator: "=" 68 | scope: span 69 | format: table 70 | limit: 20 71 | queryType: traceqlSearch 72 | rawQuery: true 73 | rawSql: "$$statement;" 74 | refId: A 75 | sql: 76 | columns: 77 | - parameters: [] 78 | type: function 79 | groupBy: 80 | - property: 81 | type: string 82 | type: groupBy 83 | limit: 50 84 | transformations: 85 | - type: regex 86 | expression: .*{"value":"(.*?)","key":"db.statement".*} 87 | mapValue: statement 88 | 89 | - name: Mimir 90 | type: prometheus 91 | access: proxy 92 | uid: mimir 93 | url: http://mimir:9009/prometheus 94 | jsonData: 95 | exemplarTraceIdDestinations: 96 | - datasourceUid: tempo 97 | name: traceID 98 | httpMethod: POST 99 | timeInterval: "2s" 100 | 101 | - name: Pyroscope 102 | type: phlare 103 | access: proxy 104 | uid: pyroscope 105 | url: http://pyroscope:4040 106 | jsonData: 107 | backendType: pyroscope 108 | 109 | - name: Postgres 110 | type: postgres 111 | uid: postgres 112 | url: mythical-database:5432 113 | user: postgres 114 | jsonData: 115 | database: postgres 116 | sslmode: disable 117 | postgresVersion: 1400 118 | secureJsonData: 119 | password: mythical 120 | -------------------------------------------------------------------------------- /grafana/provisioning/plugins/loki-explorer-app.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: 1 2 | apps: 3 | - type: grafana-lokiexplore-app 4 | orgId: 1 5 | disabled: false 6 | -------------------------------------------------------------------------------- /images/Introduction to MLTP Arch Diagram.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/grafana/intro-to-mltp/14f8df4745b93a932cd2c4bf37e9ff1e8d3ad909/images/Introduction to MLTP Arch Diagram.png -------------------------------------------------------------------------------- /k6/mythical-loadtest.js: -------------------------------------------------------------------------------- 1 | import { check, sleep } from 'k6'; 2 | import http from 'k6/http'; 3 | 4 | // This is the URL we're going to test, in this case the application server. 5 | const url = "http://mythical-server:4000"; 6 | 7 | // An index of endpoints to use, essentially the paths accepted by the server. 8 | const beasts = [ 9 | 'unicorn', 10 | 'manticore', 11 | 'illithid', 12 | 'owlbear', 13 | 'beholder', 14 | ]; 15 | 16 | // The default function is the one that will be run by k6 when it starts. 17 | export default function () { 18 | // Pick a random beast from the list, then create a random, numeric name. 19 | const beast = beasts[Math.floor(Math.random() * beasts.length)]; 20 | const randomName = Math.random().toString(36).substring(2, 15) + Math.random().toString(36).substring(2, 15); 21 | 22 | // First check a POST to the application server, we'll use the result of the POST to ensure all is well. 23 | const resPost = http.post(`${url}/${beast}`, JSON.stringify({ name: randomName }), 24 | { headers: { 'Content-Type': 'application/json' } }); 25 | // We want to ensure that 201s are returned on a POST, and that the latency is sub-300ms. 26 | check(resPost, { 27 | 'POST status was 201': (r) => r.status == 201, 28 | 'POST transaction time below 300ms': (r) => r.timings.duration < 300, 29 | }); 30 | sleep(1); 31 | 32 | // Now we'll ensure we can retrieve the named beast with a GET. 33 | const resGet = http.get(`${url}/${beast}`); 34 | // Ensure that the GET returns a 200, and that the latency is sub-300ms. 35 | check(resGet, { 36 | 'GET status was 200': (r) => r.status == 200, 37 | 'GET transaction time below 300ms': (r) => r.timings.duration < 300, 38 | }); 39 | sleep(1); 40 | 41 | // Finally we'll remove this entry (to leave the service in good condition) by removing the random name. 42 | const resDelete = http.del(`${url}/${beast}`, JSON.stringify({ name: randomName }), 43 | { headers: { 'Content-Type': 'application/json' } }); 44 | // We want to ensure that the application returned a 204 (deletion), and that it was also sub-300ms latency. 45 | check(resDelete, { 46 | 'DELETE status was 204': (r) => r.status == 204, 47 | 'DELETE transaction time was below 300ms': (r) => r.timings.duration < 300, 48 | }); 49 | sleep(1); 50 | } 51 | -------------------------------------------------------------------------------- /k8s/mythical/mythical-beasts-deployment.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | labels: 6 | name: mythical-requester 7 | name: mythical-requester 8 | namespace: default 9 | spec: 10 | progressDeadlineSeconds: 600 11 | replicas: 1 12 | selector: 13 | matchLabels: 14 | name: mythical-requester 15 | strategy: 16 | rollingUpdate: 17 | maxSurge: 25% 18 | maxUnavailable: 25% 19 | type: RollingUpdate 20 | template: 21 | metadata: 22 | annotations: 23 | prometheus.io.scrape: "true" 24 | labels: 25 | name: mythical-requester 26 | spec: 27 | containers: 28 | - env: 29 | - name: NAMESPACE 30 | value: production 31 | - name: TRACING_COLLECTOR_HOST 32 | value: 33 | - name: TRACING_COLLECTOR_PORT 34 | value: "4317" 35 | #- name: ENDPOINT_TYPE 36 | # value: BORING 37 | - name: AUTHUSER 38 | value: grafanaopsuser 39 | - name: AUTHPASSWORD 40 | value: 41 | - name: OTEL_EXPORTER_OTLP_TRACES_INSECURE 42 | value: "true" 43 | - name: POD_IP 44 | valueFrom: 45 | fieldRef: 46 | apiVersion: v1 47 | fieldPath: status.podIP 48 | - name: OTEL_RESOURCE_ATTRIBUTES 49 | value: ip=$(POD_IP) 50 | image: grafana/intro-to-mltp:mythical-beasts-requester-latest 51 | imagePullPolicy: Always 52 | name: mythical-requester 53 | resources: {} 54 | dnsPolicy: ClusterFirst 55 | restartPolicy: Always 56 | schedulerName: default-scheduler 57 | securityContext: {} 58 | terminationGracePeriodSeconds: 30 59 | --- 60 | apiVersion: apps/v1 61 | kind: Deployment 62 | metadata: 63 | labels: 64 | name: mythical-server 65 | name: mythical-server 66 | namespace: default 67 | spec: 68 | progressDeadlineSeconds: 600 69 | replicas: 3 70 | selector: 71 | matchLabels: 72 | name: mythical-server 73 | strategy: 74 | rollingUpdate: 75 | maxSurge: 25% 76 | maxUnavailable: 25% 77 | type: RollingUpdate 78 | template: 79 | metadata: 80 | annotations: 81 | prometheus.io.scrape: "true" 82 | labels: 83 | name: mythical-server 84 | spec: 85 | containers: 86 | - env: 87 | - name: NAMESPACE 88 | value: production 89 | - name: TRACING_COLLECTOR_HOST 90 | value: 91 | - name: TRACING_COLLECTOR_PORT 92 | value: "4317" 93 | #- name: ENDPOINT_TYPE 94 | # value: BORING 95 | - name: AUTHUSER 96 | value: grafanaopsuser 97 | - name: AUTHPASSWORD 98 | value: 99 | - name: OTEL_EXPORTER_OTLP_TRACES_INSECURE 100 | value: "true" 101 | - name: POD_IP 102 | valueFrom: 103 | fieldRef: 104 | apiVersion: v1 105 | fieldPath: status.podIP 106 | - name: OTEL_RESOURCE_ATTRIBUTES 107 | value: ip=$(POD_IP) 108 | image: grafana/intro-to-mltp:mythical-beasts-server-latest 109 | imagePullPolicy: Always 110 | name: mythical-server 111 | ports: 112 | - containerPort: 4000 113 | protocol: TCP 114 | resources: 115 | limits: 116 | cpu: "0.5" 117 | requests: 118 | cpu: "0.5" 119 | dnsPolicy: ClusterFirst 120 | restartPolicy: Always 121 | schedulerName: default-scheduler 122 | securityContext: {} 123 | terminationGracePeriodSeconds: 30 124 | --- 125 | apiVersion: apps/v1 126 | kind: Deployment 127 | metadata: 128 | labels: 129 | name: mythical-recorder 130 | name: mythical-recorder 131 | namespace: default 132 | spec: 133 | progressDeadlineSeconds: 600 134 | replicas: 1 135 | selector: 136 | matchLabels: 137 | name: mythical-recorder 138 | strategy: 139 | rollingUpdate: 140 | maxSurge: 25% 141 | maxUnavailable: 25% 142 | type: RollingUpdate 143 | template: 144 | metadata: 145 | annotations: 146 | prometheus.io.scrape: "true" 147 | labels: 148 | name: mythical-recorder 149 | spec: 150 | containers: 151 | - env: 152 | - name: NAMESPACE 153 | value: production 154 | - name: TRACING_COLLECTOR_HOST 155 | value: 156 | - name: TRACING_COLLECTOR_PORT 157 | value: "4317" 158 | - name: OTEL_EXPORTER_OTLP_TRACES_INSECURE 159 | value: "true" 160 | - name: POD_IP 161 | valueFrom: 162 | fieldRef: 163 | apiVersion: v1 164 | fieldPath: status.podIP 165 | - name: OTEL_RESOURCE_ATTRIBUTES 166 | value: ip=$(POD_IP) 167 | image: grafana/intro-to-mltp:mythical-beasts-recorder-latest 168 | imagePullPolicy: Always 169 | name: mythical-recorder 170 | resources: {} 171 | dnsPolicy: ClusterFirst 172 | restartPolicy: Always 173 | schedulerName: default-scheduler 174 | securityContext: {} 175 | terminationGracePeriodSeconds: 30 176 | --- 177 | apiVersion: apps/v1 178 | kind: Deployment 179 | metadata: 180 | labels: 181 | name: mythical-queue 182 | name: mythical-queue 183 | namespace: default 184 | spec: 185 | progressDeadlineSeconds: 600 186 | replicas: 1 187 | selector: 188 | matchLabels: 189 | name: mythical-queue 190 | strategy: 191 | rollingUpdate: 192 | maxSurge: 25% 193 | maxUnavailable: 25% 194 | type: RollingUpdate 195 | template: 196 | metadata: 197 | annotations: 198 | prometheus.io.scrape: "true" 199 | labels: 200 | name: mythical-queue 201 | spec: 202 | containers: 203 | - env: 204 | - name: NAMESPACE 205 | value: production 206 | image: rabbitmq:management 207 | imagePullPolicy: Always 208 | name: mythical-queue 209 | resources: {} 210 | dnsPolicy: ClusterFirst 211 | restartPolicy: Always 212 | schedulerName: default-scheduler 213 | securityContext: {} 214 | terminationGracePeriodSeconds: 30 215 | --- 216 | apiVersion: apps/v1 217 | kind: Deployment 218 | metadata: 219 | labels: 220 | name: mythical-database 221 | name: mythical-database 222 | namespace: default 223 | spec: 224 | replicas: 1 225 | selector: 226 | matchLabels: 227 | name: mythical-database 228 | strategy: 229 | type: Recreate 230 | template: 231 | metadata: 232 | annotations: 233 | prometheus.io.scrape: "true" 234 | labels: 235 | name: mythical-database 236 | spec: 237 | containers: 238 | - env: 239 | - name: POSTGRES_PASSWORD 240 | value: mythical 241 | - name: PGDATA 242 | value: /var/lib/postgresql/data/pgdata 243 | image: postgres:14.5 244 | imagePullPolicy: Always 245 | name: mythical-requester 246 | ports: 247 | - containerPort: 5432 248 | protocol: TCP 249 | resources: 250 | volumeMounts: 251 | - mountPath: /var/lib/postgresql/data 252 | name: mythical-beasts-data 253 | dnsPolicy: ClusterFirst 254 | restartPolicy: Always 255 | schedulerName: default-scheduler 256 | securityContext: {} 257 | terminationGracePeriodSeconds: 30 258 | volumes: 259 | - name: mythical-beasts-data 260 | persistentVolumeClaim: 261 | claimName: mythical-beasts-data 262 | -------------------------------------------------------------------------------- /k8s/mythical/mythical-beasts-persistentvolumeclaim.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | labels: 5 | name: mythical-beasts-data 6 | name: mythical-beasts-data 7 | spec: 8 | accessModes: 9 | - ReadWriteOnce 10 | resources: 11 | requests: 12 | storage: 10Gi 13 | status: {} 14 | -------------------------------------------------------------------------------- /k8s/mythical/mythical-beasts-service.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | labels: 6 | name: mythical-server 7 | name: mythical-server 8 | spec: 9 | ports: 10 | - name: "4000" 11 | port: 4000 12 | targetPort: 4000 13 | selector: 14 | name: mythical-server 15 | type: ClusterIP 16 | status: 17 | loadBalancer: {} 18 | --- 19 | apiVersion: v1 20 | kind: Service 21 | metadata: 22 | labels: 23 | name: mythical-queue 24 | name: mythical-queue 25 | spec: 26 | ports: 27 | - name: "4000" 28 | port: 5672 29 | targetPort: 5672 30 | selector: 31 | name: mythical-queue 32 | type: ClusterIP 33 | status: 34 | loadBalancer: {} 35 | --- 36 | apiVersion: v1 37 | kind: Service 38 | metadata: 39 | labels: 40 | name: mythical-database 41 | name: mythical-database 42 | spec: 43 | ports: 44 | - name: "5432" 45 | port: 5432 46 | targetPort: 5432 47 | selector: 48 | name: mythical-database 49 | status: 50 | loadBalancer: {} 51 | -------------------------------------------------------------------------------- /loki/loki.yaml: -------------------------------------------------------------------------------- 1 | # Disable multi-tenancy, ensuring a single tenant for all log streams. 2 | auth_enabled: false 3 | 4 | # Configuration block for the Loki server. 5 | server: 6 | http_listen_port: 3100 # Listen on port 3100 for all incoming traffic. 7 | log_level: info # Set the log level to info. 8 | 9 | # The limits configuration block allows default global and per-tenant limits to be set (which can be altered in an 10 | # overrides block). In this case, volume usage is be enabled globally (as there is one tenant). 11 | # This is used by the Logs Explorer app in Grafana. 12 | limits_config: 13 | volume_enabled: true 14 | 15 | # The common block is used to set options for all of the components that make up Loki. These can be overridden using 16 | # the specific configuration blocks for each component. 17 | common: 18 | instance_addr: 127.0.0.1 # The address at which this Loki instance can be reached on the local hash ring. 19 | # Loki is running as a single binary, so it's the localhost address. 20 | path_prefix: /loki # Prefix for all HTTP endpoints. 21 | # Configuration of the underlying Loki storage system. 22 | storage: 23 | # Use the local filesystem. In a production environment, you'd use an object store like S3 or GCS. 24 | filesystem: 25 | chunks_directory: /loki/chunks # The FS directory to store the Loki chunks in. 26 | rules_directory: /loki/rules # The FS directory to store the Loki rules in. 27 | replication_factor: 1 # The replication factor (RF) determines how many ingesters will store each chunk. 28 | # In this case, we have one ingester, so the RF is 1, but in a production system 29 | # you'd have multiple ingesters and set the RF to a higher value for resilience. 30 | # The ring configuration block is used to configure the hash ring that all components use to communicate with each other. 31 | ring: 32 | # Use an in-memory ring. In a production environment, you'd use a distributed ring like memberlist, Consul or etcd. 33 | kvstore: 34 | store: inmemory 35 | 36 | # The schema_config block is used to configure the schema that Loki uses to store log data. Loki allows the use of 37 | # multiple schemas based on specific time periods. This allows backwards compatibility on schema changes. 38 | schema_config: 39 | # Only one config is specified here. 40 | configs: 41 | - from: 2020-10-24 # When the schema applies from. 42 | store: tsdb # Where the schema is stored, in this case using the TSDB store. 43 | object_store: filesystem # As configured in the common block above, the object store is the local filesystem. 44 | schema: v13 # Specify the schema version to use, in this case the latest version (v13). 45 | # The index configuration block is used to configure how indexing tables are created and stored. Index tables 46 | # are the directory that allows Loki to determine which chunks to read when querying for logs. 47 | index: 48 | prefix: index_ # Prefix for all index tables. 49 | period: 24h # The period for which each index table covers. In this case, 24 hours. 50 | 51 | # The ruler configuration block to configure the ruler (for recording rules and alerts) in Loki. 52 | ruler: 53 | alertmanager_url: http://localhost:9093 # The URL of the Alertmanager to send alerts to. Again, this is a single 54 | # binary instance running on the same host, so it's localhost. 55 | 56 | # By default, Loki will send anonymous, but uniquely-identifiable usage and configuration 57 | # analytics to Grafana Labs. These statistics are sent to https://stats.grafana.org/ 58 | # 59 | # Statistics help us better understand how Loki is used, and they show us performance 60 | # levels for most users. This helps us prioritize features and documentation. 61 | # For more information on what's sent, look at 62 | # https://github.com/grafana/loki/blob/main/pkg/usagestats/stats.go 63 | # Refer to the buildReport method to see what goes into a report. 64 | # 65 | # If you would like to disable reporting, uncomment the following lines: 66 | #analytics: 67 | # reporting_enabled: false 68 | -------------------------------------------------------------------------------- /mimir/mimir.yaml: -------------------------------------------------------------------------------- 1 | # For more information on this configuration, see the complete reference guide at 2 | # https://grafana.com/docs/mimir/latest/references/configuration-parameters/ 3 | 4 | # Disable multi-tenancy and restrict to single tenant. 5 | multitenancy_enabled: false 6 | 7 | # The block storage configuration determines where the metrics TSDB data is stored. 8 | blocks_storage: 9 | # Use the local filesystem for block storage. 10 | # Note: It is highly recommended not to use local filesystem for production data. 11 | backend: filesystem 12 | # Directory in which to store synchronised TSDB index headers. 13 | bucket_store: 14 | sync_dir: /tmp/mimir/tsdb-sync 15 | # Directory in which to store configuration for object storage. 16 | filesystem: 17 | dir: /tmp/mimir/data/tsdb 18 | # Direction in which to store TSDB WAL data. 19 | tsdb: 20 | dir: /tmp/mimir/tsdb 21 | 22 | # The compactor block configures the compactor responsible for compacting TSDB blocks. 23 | compactor: 24 | # Directory to temporarily store blocks underdoing compaction. 25 | data_dir: /tmp/mimir/compactor 26 | # The sharding ring type used to share the hashed ring for the compactor. 27 | sharding_ring: 28 | # Use memberlist backend store (the default). 29 | kvstore: 30 | store: memberlist 31 | 32 | # The distributor receives incoming metrics data for the system. 33 | distributor: 34 | # The ring to share hash ring data across instances. 35 | ring: 36 | # The address advertised in the ring. Localhost. 37 | instance_addr: 127.0.0.1 38 | # Use memberlist backend store (the default). 39 | kvstore: 40 | store: memberlist 41 | 42 | # The ingester receives data from the distributor and processes it into indices and blocks. 43 | ingester: 44 | # The ring to share hash ring data across instances. 45 | ring: 46 | # The address advertised in the ring. Localhost. 47 | instance_addr: 127.0.0.1 48 | # Use memberlist backend store (the default). 49 | kvstore: 50 | store: memberlist 51 | # Only run one instance of the ingesters. 52 | # Note: It is highly recommended to run more than one ingester in production, the default is an RF of 3. 53 | replication_factor: 1 54 | 55 | # The ruler storage block configures ruler storage settings. 56 | ruler_storage: 57 | # Use the local filesystem for block storage. 58 | # Note: It is highly recommended not to use local filesystem for production data. 59 | backend: filesystem 60 | filesystem: 61 | # The directory in which to store rules. 62 | dir: /tmp/mimir/rules 63 | 64 | # The server block configures the Mimir server. 65 | server: 66 | # Listen on port 9009 for all incoming requests. 67 | http_listen_port: 9009 68 | # Log messages at info level. 69 | log_level: info 70 | 71 | # The store gateway block configures gateway storage. 72 | store_gateway: 73 | # Configuration for the hash ring. 74 | sharding_ring: 75 | # Only run a single instance. In production setups, the replication factor must 76 | # be set on the querier and ruler as well. 77 | replication_factor: 1 78 | 79 | # Global limits configuration. 80 | limits: 81 | # A maximum of 100000 exemplars in memory at any one time. 82 | # This setting enables exemplar processing and storage. 83 | max_global_exemplars_per_user: 100000 84 | ingestion_rate: 30000 85 | -------------------------------------------------------------------------------- /otel/otel.yml: -------------------------------------------------------------------------------- 1 | # Define the protocols to receive data for. 2 | # See https://opentelemetry.io/docs/collector/configuration/#receivers 3 | receivers: 4 | # Configure receiving OTLP data via gRPC on port 4317 and HTTP on port 4318. 5 | otlp: 6 | protocols: 7 | grpc: 8 | endpoint: 0.0.0.0:4317 9 | http: 10 | endpoint: 0.0.0.0:4318 11 | 12 | # Defines a Prometheus configuration set. 13 | prometheus: 14 | # Define a set of configurations for scraping by the OpenTelemetry Collector. 15 | config: 16 | # The `scrape_configs` section pertains to the Prometheus `scrape_configs` 17 | # configuration block. 18 | # See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config 19 | scrape_configs: 20 | # Scrape Mimir metrics. 21 | - job_name: 'mimir' 22 | static_configs: 23 | - targets: ['mimir:9009'] 24 | labels: 25 | service: 'mimir' 26 | group: 'infrastructure' 27 | 28 | # Scrape Loki metrics. 29 | - job_name: 'loki' 30 | static_configs: 31 | - targets: ['loki:3100'] 32 | labels: 33 | service: 'loki' 34 | group: 'infrastructure' 35 | 36 | # Scrape Tempo metrics. 37 | - job_name: 'tempo' 38 | static_configs: 39 | - targets: ['tempo:3200'] 40 | labels: 41 | service: 'tempo' 42 | group: 'infrastructure' 43 | 44 | # Scrape Grafana metrics. 45 | - job_name: 'grafana' 46 | static_configs: 47 | - targets: ['grafana:3000'] 48 | labels: 49 | service: 'grafana' 50 | group: 'infrastructure' 51 | 52 | # Scrape from the Mythical Server and Requester service. 53 | - job_name: 'mythical' 54 | scrape_interval: 2s 55 | static_configs: 56 | - targets: ['mythical-server:4000'] 57 | labels: 58 | service: 'mythical-server' 59 | group: 'mythical' 60 | - targets: ['mythical-requester:4001'] 61 | labels: 62 | service: 'mythical-requester' 63 | group: 'mythical' 64 | 65 | # Scrape from the Beyla Mythical services. 66 | - job_name: 'beyla-infra' 67 | scrape_interval: 15s 68 | static_configs: 69 | - targets: ['beyla-requester:9090'] 70 | labels: 71 | service: 'beyla-requester' 72 | group: 'beyla' 73 | - targets: ['beyla-server:9090'] 74 | labels: 75 | service: 'beyla-server' 76 | group: 'beyla' 77 | - targets: ['beyla-recorder:9090'] 78 | labels: 79 | service: 'beyla-recorder' 80 | group: 'beyla' 81 | 82 | 83 | # Define processors to process received data. 84 | # See https://opentelemetry.io/docs/collector/configuration/#processors 85 | processors: 86 | # Use the in-built `batch` processor to batch up data before writing it for export. 87 | # Use the default values for it. 88 | batch: 89 | 90 | # The tail sampler processor will only keep traces where spans match the defined policies. 91 | tail_sampling: 92 | decision_wait: 30s # The time to wait for a decision to be made. 93 | # The following policies follow a logical OR pattern, meaning that if any of the policies match, 94 | # the trace will be kept. For logical AND, you can use the `and` policy. Every span of a trace is 95 | # examined by each policy in turn. A match will cause a short-circuit. 96 | policies: [ 97 | # This policy defines that traces that include spans that contain errors should be kept. 98 | { 99 | name: sample-erroring-traces, # Name of the policy. 100 | type: status_code, # The type must match the type of policy to be used. 101 | status_code: { status_codes: [ERROR] } # Only sample traces which have a span containing an error. 102 | }, 103 | # This policy defines that traces that are over 200ms should be sampled. 104 | { 105 | name: sample-long-traces, # Name of the policy. 106 | type: latency, # The type must match the type of policy to be used. 107 | latency: { threshold_ms: 200 }, # Only sample traces which are longer than 200ms in duration. 108 | }, 109 | ] 110 | 111 | # The transform processor is used to rename the span metrics to match the Tempo naming convention. 112 | transform: 113 | # Only operate on metric statements. 114 | metric_statements: 115 | # Operate on the metric data. 116 | - context: metric 117 | statements: 118 | # Rename the `traces.spanmetrics.duration` metric to `traces.spanmetrics.latency`. 119 | - set(metric.name, "traces.spanmetrics.latency") where metric.name == "traces.spanmetrics.duration" 120 | # Rename the `traces.spanmetrics.calls` metric to `traces.spanmetrics.calls.total` to pre-suffix name. 121 | - set(metric.name, "traces.spanmetrics.calls.total") where metric.name == "traces.spanmetrics.calls" 122 | 123 | 124 | # Define processors to process received data. 125 | # See https://opentelemetry.io/docs/collector/configuration/#connectors 126 | connectors: 127 | # The spanmetrics connector is used to output span metrics based on received trace spans. 128 | spanmetrics: 129 | namespace: traces.spanmetrics # Prefix all metrics with `traces.spanmetrics` (this becomes `traces_spanmetrics`). 130 | # Explicitly flush metrics every 30 seconds. Note, this will double active series count for the `trace.spanmetrics` 131 | # metric namespace. 132 | metrics_flush_interval: 30s 133 | 134 | # Determine the type of histogram to use for span metrics. 135 | histogram: 136 | explicit: # Explicit histograms have pre-defined bucket sizes (use default here). 137 | # Defines additional label dimensions of the metrics from trace span attributes present. 138 | dimensions: 139 | - name: http.method 140 | - name: http.target 141 | - name: http.status_code 142 | - name: service.version 143 | # Ensure exemplars are enabled and sent to the metrics store. 144 | exemplars: 145 | enabled: true 146 | 147 | # The servicegraph connector is used to output service node metrics based on received trace spans. 148 | servicegraph: 149 | # Explicitly flush metrics every 60 seconds. Note, this will double active series count for the 150 | # `trace.servicegraph` metric namespace. 151 | metrics_flush_interval: 60s 152 | # Defines which exporter the processor will write metrics to. 153 | metrics_exporter: prometheusremotewrite 154 | # Defines additional label dimensions of the metrics from trace span attributes present. 155 | store: # Configuration for the in-memory store. 156 | ttl: 2s # Time to wait for an edge to be completed. 157 | max_items: 200 # Number of edges that will be stored in the storeMap. 158 | cache_loop: 2m # The timeout used to clean the cache periodically. 159 | store_expiration_loop: 10s # The timeout used to expire old entries from the store periodically. 160 | # Virtual node peer attributes allow server nodes to be generated where instrumentation isn't present (eg. where 161 | # service client calls remotely to a service that does not include instrumentation). 162 | # Service nodes/edges will be generated for any attribute defined. 163 | virtual_node_peer_attributes: 164 | - db.name 165 | 166 | 167 | # Define exporters to data stores. 168 | # See https://opentelemetry.io/docs/collector/configuration/#exporters 169 | # Also see https://github.com/open-telemetry/opentelemetry-collector/tree/main/processor#recommended-processors 170 | exporters: 171 | # Exporter for sending trace data to Tempo. 172 | otlp/grafana: 173 | # Send to the locally running Tempo service. 174 | endpoint: tempo:4317 175 | # TLS is not enabled for the instance. 176 | tls: 177 | insecure: true 178 | 179 | # Exporter for sending Prometheus data to Mimir. 180 | prometheusremotewrite: 181 | # Don't add suffixes to the metrics. We've already renamed the `traces.spanmetrics.calls` metric to 182 | # `traces.spanmetrics.calls.total`, and we don't want to add the `_milliseconds` suffix to the 183 | # `traces.spanmetrics.latency` metric. 184 | add_metric_suffixes: false 185 | # Send to the locally running Mimir service. 186 | endpoint: http://mimir:9009/api/v1/push 187 | # TLS is not enabled for the instance. 188 | tls: 189 | insecure: true 190 | 191 | # Define the full service graph for the OpenTelemetry collector. 192 | service: 193 | # A pipeline can exist for each of the signals received. 194 | pipelines: 195 | # Define the trace pipeline. 196 | traces: 197 | # Receive from the `otlp` receiver. 198 | receivers: [otlp] 199 | # Use the `batch` processor to process received trace spans. 200 | processors: [batch] 201 | # Comment out other `processor` definitions and uncomment the line below to use tail sampling. 202 | #processors: [tail_sampling, batch] 203 | # Export to the `otlp/grafana` exporter. 204 | exporters: [otlp/grafana] 205 | # Comment out other `exporters` definitions and uncomment the line below to generate span metrics 206 | # from within the OpenTelemetry Collector as well as exporting traces to Tempo. 207 | #exporters: [otlp/grafana, spanmetrics, servicegraph] 208 | 209 | # Define the metrics pipeline. 210 | metrics: 211 | # Receive metrics from the `prometheus` receiver. 212 | receivers: [otlp, prometheus] 213 | # Comment out other `receivers` definitions and uncomment the line below to import spanmetrics as well 214 | # as prometheus metrics. 215 | #receivers: [otlp, prometheus, spanmetrics, servicegraph] 216 | # Use the `batch` processor to process received metrics, use the transform metric to ensure that spanmetric 217 | # metrics are in the correct format for Grafana Cloud (doesn't take effect unless receivers above are used.) 218 | processors: [transform, batch] 219 | # Export to the `prometheusremtotewrite` exporter. 220 | exporters: [prometheusremotewrite] 221 | -------------------------------------------------------------------------------- /source/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/grafana/intro-to-mltp/14f8df4745b93a932cd2c4bf37e9ff1e8d3ad909/source/.DS_Store -------------------------------------------------------------------------------- /source/build-source.sh: -------------------------------------------------------------------------------- 1 | # Builds Docker images for the Mythical source code. 2 | # This assumes: 3 | # * That an authenticated Docker client is available for the appropriate target registry. 4 | # * That the Docker client is configured to use buildx. 5 | registryName=$1 6 | version=$2 7 | 8 | # Ensure we have both parameters. 9 | if [ -z "$registryName" ] || [ -z "$version" ] 10 | then 11 | echo "Usage: build-source.sh " 12 | exit 1 13 | fi 14 | 15 | # Build'n'push 16 | docker buildx build --build-arg SERVICE=mythical-beasts-requester -f ./source/docker/Dockerfile -t ${registryName}:mythical-beasts-requester-${version} -t ${registryName}:mythical-beasts-requester-latest --platform linux/amd64,linux/arm64 --push source/ 17 | docker buildx build --build-arg SERVICE=mythical-beasts-server -f ./source/docker/Dockerfile -t ${registryName}:mythical-beasts-server-${version} -t ${registryName}:mythical-beasts-server-latest --platform linux/amd64,linux/arm64 --push source/ 18 | docker buildx build --build-arg SERVICE=mythical-beasts-recorder -f ./source/docker/Dockerfile -t ${registryName}:mythical-beasts-recorder-${version} -t ${registryName}:mythical-beasts-recorder-latest --platform linux/amd64,linux/arm64 --push source/ 19 | -------------------------------------------------------------------------------- /source/common/endpoints.js: -------------------------------------------------------------------------------- 1 | // Uses the envvar ENDPOINT_TYPE to determine which set 2 | // of names to return. 3 | module.exports = () => { 4 | let nameSet; 5 | let servicePrefix; 6 | let spanTag; 7 | let accumulators; 8 | 9 | if (process.env.ENDPOINT_TYPE === 'BORING') { 10 | nameSet = [ 11 | 'login', 12 | 'payment', 13 | 'account', 14 | 'cart', 15 | 'health', 16 | 'fastcache', 17 | ]; 18 | servicePrefix = 'eStore'; 19 | spanTag = 'endpoint'; 20 | accumulators = [ 21 | 1, 22 | 4, 23 | 5, 24 | 2, 25 | 3, 26 | 6, 27 | ]; 28 | } else { 29 | nameSet = [ 30 | 'unicorn', 31 | 'manticore', 32 | 'illithid', 33 | 'owlbear', 34 | 'beholder', 35 | ]; 36 | servicePrefix = 'mythical'; 37 | spanTag = 'beast'; 38 | accumulators = [ 39 | 1, 40 | 23, 41 | 13, 42 | 32, 43 | 153, 44 | ]; 45 | } 46 | 47 | return { 48 | nameSet, 49 | servicePrefix, 50 | spanTag, 51 | accumulators, 52 | }; 53 | }; 54 | -------------------------------------------------------------------------------- /source/common/logging.js: -------------------------------------------------------------------------------- 1 | const axios = require('axios'); 2 | 3 | module.exports = (serviceName, context) => { 4 | return async (tracingObj) => { 5 | // Tracing 6 | const { api, tracer } = tracingObj; 7 | 8 | const toLokiServer = async (details) => { 9 | const { message, level, job, endpointLabel, endpoint, namespace } = details; 10 | let error = false; 11 | let stream = { 12 | service_name: serviceName, 13 | level, 14 | job, 15 | namespace, 16 | }; 17 | if (endpoint) { 18 | stream[endpointLabel] = endpoint; 19 | } 20 | 21 | try { 22 | await axios.post(process.env.LOGS_TARGET, { 23 | streams: [ 24 | { 25 | stream, 26 | 'values': [ 27 | [ `${Date.now() * 1000000}`, message ] 28 | ] 29 | } 30 | ] 31 | }); 32 | } catch (err) { 33 | console.log(`Logging error: ${err}`); 34 | error = true; 35 | } 36 | 37 | return error; 38 | }; 39 | 40 | // Logging system sends to Loki 41 | const logEntry = async (details) => { 42 | let logSpan; 43 | let error = false; 44 | if (context === 'requester') { 45 | // Create a new span 46 | logSpan = tracer.startSpan("log_to_loki"); 47 | } 48 | 49 | if (process.env.LOGS_TARGET) { 50 | error = await toLokiServer(details); 51 | } else { 52 | console.log(details.message); 53 | } 54 | 55 | if (context === 'requester') { 56 | // Set the status code as OK and end the span 57 | logSpan.setStatus({ code: (!error) ? api.SpanStatusCode.OK : api.SpanStatusCode.ERROR }); 58 | logSpan.end(); 59 | } 60 | }; 61 | 62 | return logEntry; 63 | }; 64 | } 65 | -------------------------------------------------------------------------------- /source/common/queue.js: -------------------------------------------------------------------------------- 1 | module.exports = () => { 2 | const queueName = 'messages'; 3 | 4 | return async (tracingObj) => { 5 | const {api, tracer} = tracingObj; 6 | 7 | // import here to ensure it's called after the tracing client has been set up 8 | const amqplib = require('amqplib'); 9 | 10 | const connection = await amqplib.connect('amqp://mythical-queue'); 11 | const channel = await connection.createChannel(); 12 | await channel.assertQueue(queueName); 13 | 14 | const sendMessage = async msg => { 15 | tracer.startActiveSpan('publish_to_queue', async span => { 16 | try { 17 | channel.sendToQueue(queueName, Buffer.from(msg)); 18 | if (msg.match(/(?:\/beholder|\/unicorn)/i)) { 19 | await new Promise(r => setTimeout(r, (Math.random() * 2000) + 500)); 20 | } 21 | span.setStatus({code: api.SpanStatusCode.OK}); 22 | } catch (err) { 23 | console.log(`Error publishing message on the queue: ${err}`); 24 | span.setStatus({code: api.SpanStatusCode.ERROR}); 25 | } 26 | span.end(); 27 | }); 28 | }; 29 | 30 | const consumeMessages = async callback => { 31 | await channel.consume(queueName, async function (msg) { 32 | await callback(msg); 33 | channel.ack(msg); 34 | }); 35 | }; 36 | 37 | return { sendMessage, consumeMessages }; 38 | } 39 | } -------------------------------------------------------------------------------- /source/common/tracing.js: -------------------------------------------------------------------------------- 1 | // This is shared between the requester, the recorder and the server 2 | // As such, to only do what's needed, we init using a 3 | // function and then pass the service context to 4 | // determine what to initialise. 5 | module.exports = (context, serviceName) => { 6 | // Include all OpenTelemetry dependencies for tracing 7 | const api = require("@opentelemetry/api"); 8 | const { NodeTracerProvider } = require("@opentelemetry/sdk-trace-node"); 9 | const { SimpleSpanProcessor } = require("@opentelemetry/sdk-trace-base"); 10 | const { OTLPTraceExporter } = require('@opentelemetry/exporter-trace-otlp-grpc'); 11 | const { detectResources, resourceFromAttributes } = require('@opentelemetry/resources'); 12 | const { SEMRESATTRS_SERVICE_NAME } = require('@opentelemetry/semantic-conventions'); 13 | const { registerInstrumentations } = require('@opentelemetry/instrumentation'); 14 | const { getNodeAutoInstrumentations } = require('@opentelemetry/auto-instrumentations-node'); 15 | 16 | return async () => { 17 | let W3CTraceContextPropagator; 18 | if (context === 'requester') { 19 | W3CTraceContextPropagator = require("@opentelemetry/core").W3CTraceContextPropagator; 20 | } 21 | 22 | // Detect resources and then merge with the service name 23 | const detected = await detectResources(); 24 | const resources = resourceFromAttributes({ 25 | [SEMRESATTRS_SERVICE_NAME]: serviceName, 26 | }).merge(detected); 27 | 28 | // Export via OTLP gRPC 29 | const exporter = new OTLPTraceExporter({ 30 | url: `${process.env.TRACING_COLLECTOR_HOST}:${process.env.TRACING_COLLECTOR_PORT}` 31 | }); 32 | 33 | 34 | // Use simple span processor (for production code without memory pressure, you should probably use Batch) 35 | const processor = new SimpleSpanProcessor(exporter); 36 | 37 | // Create a tracer provider 38 | const provider = new NodeTracerProvider({ 39 | resource: resources, 40 | spanProcessors: [processor], 41 | }); 42 | provider.register(); 43 | 44 | // Create a new header for propagation from a given span 45 | let createPropagationHeader; 46 | if (context === 'requester') { 47 | const propagator = new W3CTraceContextPropagator(); 48 | createPropagationHeader = (span) => { 49 | let carrier = {}; 50 | // Inject the current trace context into the carrier object 51 | propagator.inject( 52 | api.trace.setSpanContext(api.ROOT_CONTEXT, span.spanContext()), 53 | carrier, 54 | api.defaultTextMapSetter 55 | ); 56 | return carrier; 57 | }; 58 | } 59 | 60 | registerInstrumentations({ 61 | instrumentations: [getNodeAutoInstrumentations()], 62 | }); 63 | 64 | // Return instances of the API and the tracer to the calling app 65 | return { 66 | tracer: api.trace.getTracer(serviceName), 67 | api: api, 68 | propagator: createPropagationHeader, 69 | } 70 | }; 71 | }; 72 | -------------------------------------------------------------------------------- /source/docker/Dockerfile: -------------------------------------------------------------------------------- 1 | # Multistage build - allows for smaller final images post Python build 2 | FROM node:23-alpine3.20 AS builder 3 | ARG SERVICE 4 | 5 | WORKDIR /usr/src/app 6 | 7 | # Install Python3 - Needed for the pprof NPM module 8 | ENV PYTHONUNBUFFERED=1 9 | RUN apk add --update --no-cache python3 alpine-sdk && ln -sf python3 /usr/bin/python 10 | ## && \ 11 | ## python3 -m ensurepip 12 | # && \ 13 | # pip3 install --no-cache --upgrade pip setuptools 14 | 15 | COPY ${SERVICE}/package.json /usr/src/app/ 16 | 17 | RUN apk update && apk upgrade && \ 18 | apk add --no-cache bash git openssh 19 | 20 | RUN npm install --production \ 21 | && npm cache clean --force \ 22 | && rm -rf /tmp/* 23 | 24 | # Create a slimmer image using the built node_modules 25 | FROM node:23-alpine3.20 26 | ARG SERVICE 27 | 28 | WORKDIR /usr/src/app 29 | 30 | COPY --from=builder /usr/src/app/node_modules ./node_modules 31 | COPY --from=builder /usr/src/app/package* ./ 32 | 33 | COPY ${SERVICE}/*.js /usr/src/app/ 34 | COPY common/*.js /usr/src/app/ 35 | 36 | CMD ["npm", "start"] 37 | -------------------------------------------------------------------------------- /source/mythical-beasts-frontend/.gitignore: -------------------------------------------------------------------------------- 1 | # Dependencies 2 | /node_modules 3 | /.pnp 4 | .pnp.js 5 | 6 | # Testing 7 | /coverage 8 | 9 | # Production 10 | /build 11 | 12 | # Misc 13 | .DS_Store 14 | .env.local 15 | .env.development.local 16 | .env.test.local 17 | .env.production.local 18 | 19 | # Logs 20 | npm-debug.log* 21 | yarn-debug.log* 22 | yarn-error.log* 23 | 24 | # IDE 25 | .vscode/ 26 | .idea/ 27 | *.swp 28 | *.swo 29 | 30 | # OS 31 | Thumbs.db 32 | -------------------------------------------------------------------------------- /source/mythical-beasts-frontend/Dockerfile: -------------------------------------------------------------------------------- 1 | # Build stage 2 | FROM node:18-alpine AS build 3 | 4 | # Accept build arguments 5 | ARG REACT_APP_API_URL 6 | ARG REACT_APP_ALLOY_ENDPOINT 7 | 8 | # Set as environment variables for the build process 9 | ENV REACT_APP_API_URL=$REACT_APP_API_URL 10 | ENV REACT_APP_ALLOY_ENDPOINT=$REACT_APP_ALLOY_ENDPOINT 11 | 12 | WORKDIR /app 13 | 14 | # Copy package files 15 | COPY package*.json ./ 16 | 17 | # Install all dependencies (including dev dependencies needed for build) 18 | RUN npm ci 19 | 20 | # Copy source code 21 | COPY . . 22 | 23 | # Build the React app with rsbuild 24 | RUN npm run build 25 | 26 | # Production stage 27 | FROM nginx:alpine 28 | 29 | # Copy custom nginx config 30 | COPY nginx.conf /etc/nginx/nginx.conf 31 | 32 | # Copy built app from build stage 33 | COPY --from=build /app/build /usr/share/nginx/html 34 | 35 | # Expose port 80 36 | EXPOSE 80 37 | 38 | # Start nginx 39 | CMD ["nginx", "-g", "daemon off;"] 40 | -------------------------------------------------------------------------------- /source/mythical-beasts-frontend/nginx.conf: -------------------------------------------------------------------------------- 1 | events { 2 | worker_connections 1024; 3 | } 4 | 5 | http { 6 | include /etc/nginx/mime.types; 7 | default_type application/octet-stream; 8 | 9 | # Logging 10 | log_format main '$remote_addr - $remote_user [$time_local] "$request" ' 11 | '$status $body_bytes_sent "$http_referer" ' 12 | '"$http_user_agent" "$http_x_forwarded_for"'; 13 | 14 | access_log /var/log/nginx/access.log main; 15 | error_log /var/log/nginx/error.log; 16 | 17 | # Gzip compression 18 | gzip on; 19 | gzip_vary on; 20 | gzip_min_length 1024; 21 | gzip_types text/plain text/css text/xml text/javascript application/javascript application/xml+rss application/json; 22 | 23 | # Server configuration 24 | server { 25 | listen 80; 26 | server_name localhost; 27 | root /usr/share/nginx/html; 28 | index index.html; 29 | 30 | # Security headers 31 | add_header X-Frame-Options "SAMEORIGIN" always; 32 | add_header X-Content-Type-Options "nosniff" always; 33 | add_header X-XSS-Protection "1; mode=block" always; 34 | 35 | # Handle React Router (client-side routing) 36 | location / { 37 | try_files $uri $uri/ /index.html; 38 | } 39 | 40 | # Cache static assets 41 | location ~* \.(js|css|png|jpg|jpeg|gif|ico|svg)$ { 42 | expires 1y; 43 | add_header Cache-Control "public, immutable"; 44 | } 45 | 46 | # API proxy to backend - proxy API requests to the mythical-server 47 | location /api/ { 48 | # Remove /api prefix and proxy to backend 49 | rewrite ^/api/(.*)$ /$1 break; 50 | proxy_pass http://mythical-server:4000; 51 | proxy_set_header Host $host; 52 | proxy_set_header X-Real-IP $remote_addr; 53 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 54 | proxy_set_header X-Forwarded-Proto $scheme; 55 | 56 | # CORS headers for direct API access via nginx 57 | add_header Access-Control-Allow-Origin "http://localhost:3001" always; 58 | add_header Access-Control-Allow-Methods "GET, POST, DELETE, OPTIONS" always; 59 | add_header Access-Control-Allow-Headers "Content-Type, Authorization" always; 60 | add_header Access-Control-Allow-Credentials "true" always; 61 | 62 | # Handle preflight requests 63 | if ($request_method = 'OPTIONS') { 64 | add_header Access-Control-Allow-Origin "http://localhost:3001"; 65 | add_header Access-Control-Allow-Methods "GET, POST, DELETE, OPTIONS"; 66 | add_header Access-Control-Allow-Headers "Content-Type, Authorization"; 67 | add_header Access-Control-Allow-Credentials "true"; 68 | add_header Content-Length 0; 69 | add_header Content-Type text/plain; 70 | return 204; 71 | } 72 | } 73 | 74 | # Health check endpoint 75 | location /health { 76 | access_log off; 77 | return 200 "healthy\n"; 78 | add_header Content-Type text/plain; 79 | } 80 | } 81 | } 82 | -------------------------------------------------------------------------------- /source/mythical-beasts-frontend/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "mythical-beasts-frontend", 3 | "version": "1.0.0", 4 | "private": true, 5 | "type": "module", 6 | "dependencies": { 7 | "@grafana/faro-web-sdk": "^1.4.2", 8 | "@grafana/faro-web-tracing": "^1.4.2", 9 | "axios": "^1.10.0", 10 | "react": "^18.2.0", 11 | "react-dom": "^18.2.0" 12 | }, 13 | "devDependencies": { 14 | "@rsbuild/core": "^1.0.0", 15 | "@rsbuild/plugin-react": "^1.0.0", 16 | "@types/react": "^18.2.0", 17 | "@types/react-dom": "^18.2.0" 18 | }, 19 | "scripts": { 20 | "dev": "rsbuild dev", 21 | "build": "rsbuild build", 22 | "preview": "rsbuild preview" 23 | }, 24 | "browserslist": { 25 | "production": [ 26 | ">0.2%", 27 | "not dead", 28 | "not op_mini all" 29 | ], 30 | "development": [ 31 | "last 1 chrome version", 32 | "last 1 firefox version", 33 | "last 1 safari version" 34 | ] 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /source/mythical-beasts-frontend/public/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 12 | 13 | 14 | Mythical Beasts Manager 15 | 27 | 28 | 29 | 30 |
31 | 32 | 33 | -------------------------------------------------------------------------------- /source/mythical-beasts-frontend/public/manifest.json: -------------------------------------------------------------------------------- 1 | { 2 | "short_name": "Mythical Beasts", 3 | "name": "Mythical Beasts Frontend", 4 | "description": "Faro Web SDK Demonstration", 5 | "icons": [ 6 | { 7 | "src": "favicon.ico", 8 | "sizes": "64x64 32x32 24x24 16x16", 9 | "type": "image/x-icon" 10 | } 11 | ], 12 | "start_url": ".", 13 | "display": "standalone", 14 | "theme_color": "#667eea", 15 | "background_color": "#ffffff" 16 | } 17 | -------------------------------------------------------------------------------- /source/mythical-beasts-frontend/rsbuild.config.js: -------------------------------------------------------------------------------- 1 | import { defineConfig } from '@rsbuild/core'; 2 | import { pluginReact } from '@rsbuild/plugin-react'; 3 | 4 | export default defineConfig({ 5 | plugins: [pluginReact()], 6 | html: { 7 | template: './public/index.html', 8 | }, 9 | source: { 10 | entry: { 11 | index: './src/index.js', 12 | }, 13 | }, 14 | output: { 15 | distPath: { 16 | root: 'build', 17 | }, 18 | }, 19 | server: { 20 | port: 3000, 21 | }, 22 | dev: { 23 | hmr: true, 24 | }, 25 | tools: { 26 | htmlPlugin: { 27 | templateParameters: { 28 | PUBLIC_URL: process.env.NODE_ENV === 'production' ? '' : '', 29 | }, 30 | }, 31 | }, 32 | environments: { 33 | web: { 34 | source: { 35 | define: { 36 | 'process.env.REACT_APP_API_URL': JSON.stringify(process.env.REACT_APP_API_URL || '/api'), 37 | 'process.env.REACT_APP_ALLOY_ENDPOINT': JSON.stringify(process.env.REACT_APP_ALLOY_ENDPOINT || 'http://localhost:12350/collect'), 38 | 'process.env.NODE_ENV': JSON.stringify(process.env.NODE_ENV || 'development'), 39 | }, 40 | }, 41 | }, 42 | }, 43 | }); 44 | -------------------------------------------------------------------------------- /source/mythical-beasts-frontend/src/App.css: -------------------------------------------------------------------------------- 1 | .App { 2 | text-align: center; 3 | min-height: 100vh; 4 | display: flex; 5 | flex-direction: column; 6 | } 7 | 8 | .App-header { 9 | background: rgba(255, 255, 255, 0.1); 10 | backdrop-filter: blur(10px); 11 | padding: 2rem; 12 | color: white; 13 | text-shadow: 0 2px 4px rgba(0, 0, 0, 0.3); 14 | margin-bottom: 2rem; 15 | } 16 | 17 | .App-header h1 { 18 | margin: 0; 19 | font-size: 3rem; 20 | font-weight: 700; 21 | } 22 | 23 | .App-header p { 24 | margin: 0.5rem 0; 25 | font-size: 1.2rem; 26 | opacity: 0.9; 27 | } 28 | 29 | .server-status { 30 | display: flex; 31 | align-items: center; 32 | justify-content: center; 33 | gap: 0.5rem; 34 | margin-top: 1rem; 35 | font-size: 0.9rem; 36 | } 37 | 38 | .status-indicator { 39 | width: 12px; 40 | height: 12px; 41 | border-radius: 50%; 42 | animation: pulse 2s infinite; 43 | } 44 | 45 | .status-indicator.connected { 46 | background-color: #4caf50; 47 | } 48 | 49 | .status-indicator.disconnected { 50 | background-color: #f44336; 51 | } 52 | 53 | .status-indicator.checking { 54 | background-color: #ff9800; 55 | } 56 | 57 | @keyframes pulse { 58 | 0%, 100% { opacity: 1; } 59 | 50% { opacity: 0.5; } 60 | } 61 | 62 | .App-main { 63 | flex: 1; 64 | padding: 0 2rem; 65 | max-width: 1200px; 66 | margin: 0 auto; 67 | width: 100%; 68 | } 69 | 70 | .beast-selector { 71 | margin-bottom: 3rem; 72 | } 73 | 74 | .beast-selector h2 { 75 | color: white; 76 | font-size: 2rem; 77 | margin-bottom: 1.5rem; 78 | text-shadow: 0 2px 4px rgba(0, 0, 0, 0.3); 79 | } 80 | 81 | .beast-buttons { 82 | display: flex; 83 | flex-wrap: wrap; 84 | gap: 1rem; 85 | justify-content: center; 86 | margin-bottom: 2rem; 87 | } 88 | 89 | .beast-button { 90 | background: rgba(255, 255, 255, 0.1); 91 | backdrop-filter: blur(10px); 92 | border: 3px solid transparent; 93 | border-radius: 15px; 94 | padding: 1rem 1.5rem; 95 | color: white; 96 | cursor: pointer; 97 | transition: all 0.3s ease; 98 | display: flex; 99 | flex-direction: column; 100 | align-items: center; 101 | gap: 0.5rem; 102 | min-width: 120px; 103 | text-shadow: 0 1px 2px rgba(0, 0, 0, 0.3); 104 | } 105 | 106 | .beast-button:hover { 107 | background: rgba(255, 255, 255, 0.2); 108 | transform: translateY(-2px); 109 | box-shadow: 0 8px 25px rgba(0, 0, 0, 0.2); 110 | } 111 | 112 | .beast-button.active { 113 | background: rgba(255, 255, 255, 0.25); 114 | border-color: currentColor; 115 | transform: scale(1.05); 116 | box-shadow: 0 8px 25px rgba(0, 0, 0, 0.3); 117 | } 118 | 119 | .beast-emoji { 120 | font-size: 2rem; 121 | margin-bottom: 0.25rem; 122 | } 123 | 124 | .beast-name { 125 | font-weight: 600; 126 | font-size: 1rem; 127 | } 128 | 129 | .beast-manager-container { 130 | background: rgba(255, 255, 255, 0.95); 131 | backdrop-filter: blur(10px); 132 | border-radius: 20px; 133 | padding: 2rem; 134 | box-shadow: 0 20px 50px rgba(0, 0, 0, 0.1); 135 | margin-bottom: 2rem; 136 | } 137 | 138 | .App-footer { 139 | background: rgba(255, 255, 255, 0.1); 140 | backdrop-filter: blur(10px); 141 | padding: 1rem; 142 | color: white; 143 | text-shadow: 0 1px 2px rgba(0, 0, 0, 0.3); 144 | margin-top: auto; 145 | } 146 | 147 | .App-footer p { 148 | margin: 0; 149 | opacity: 0.8; 150 | } 151 | 152 | @media (max-width: 768px) { 153 | .App-header h1 { 154 | font-size: 2rem; 155 | } 156 | 157 | .App-header p { 158 | font-size: 1rem; 159 | } 160 | 161 | .beast-buttons { 162 | flex-direction: column; 163 | align-items: center; 164 | } 165 | 166 | .beast-button { 167 | width: 100%; 168 | max-width: 200px; 169 | } 170 | 171 | .App-main { 172 | padding: 0 1rem; 173 | } 174 | 175 | .beast-manager-container { 176 | padding: 1rem; 177 | } 178 | } 179 | -------------------------------------------------------------------------------- /source/mythical-beasts-frontend/src/App.js: -------------------------------------------------------------------------------- 1 | import React, { useState, useEffect } from 'react'; 2 | import './App.css'; 3 | import BeastManager from './components/BeastManager'; 4 | import { getBeastData } from './services/api'; 5 | 6 | const BEAST_TYPES = [ 7 | { id: 'unicorn', name: 'Unicorn', emoji: '🦄', color: '#ff6b6b' }, 8 | { id: 'manticore', name: 'Manticore', emoji: '🦁', color: '#4ecdc4' }, 9 | { id: 'illithid', name: 'Illithid', emoji: '🐙', color: '#45b7d1' }, 10 | { id: 'owlbear', name: 'Owlbear', emoji: '🦉', color: '#96ceb4' }, 11 | { id: 'beholder', name: 'Beholder', emoji: '👁️', color: '#feca57' }, 12 | ]; 13 | 14 | function App() { 15 | const [selectedBeast, setSelectedBeast] = useState(BEAST_TYPES[0]); 16 | const [serverStatus, setServerStatus] = useState('checking'); 17 | 18 | useEffect(() => { 19 | // Check if server is accessible 20 | const checkServer = async () => { 21 | try { 22 | await getBeastData('unicorn'); 23 | setServerStatus('connected'); 24 | } catch (error) { 25 | setServerStatus('disconnected'); 26 | } 27 | }; 28 | checkServer(); 29 | }, []); 30 | 31 | return ( 32 |
33 |
34 |

🏰 Mythical Beasts Frontend

35 |

Grafana Faro Web SDK Demonstration

36 |
37 | 38 | Server: {serverStatus} 39 |
40 |
41 | 42 |
43 |
44 |

Select a beast type

45 |
46 | {BEAST_TYPES.map((beast) => ( 47 | 56 | ))} 57 |
58 |
59 | 60 |
61 | 65 |
66 |
67 | 68 |
69 |

Built with React ⚛️

70 |
71 |
72 | ); 73 | } 74 | 75 | export default App; 76 | -------------------------------------------------------------------------------- /source/mythical-beasts-frontend/src/components/BeastManager.css: -------------------------------------------------------------------------------- 1 | .beast-manager { 2 | max-width: 800px; 3 | margin: 0 auto; 4 | text-align: left; 5 | } 6 | 7 | .beast-manager h2 { 8 | font-size: 2.2rem; 9 | margin-bottom: 2rem; 10 | text-align: center; 11 | font-weight: 600; 12 | } 13 | 14 | .beast-manager.server-disconnected { 15 | text-align: center; 16 | padding: 2rem; 17 | } 18 | 19 | .add-section { 20 | margin-bottom: 2rem; 21 | padding: 1.5rem; 22 | background: #f8f9fa; 23 | border-radius: 12px; 24 | border: 2px solid #e9ecef; 25 | } 26 | 27 | .add-form { 28 | width: 100%; 29 | } 30 | 31 | .input-group { 32 | display: flex; 33 | gap: 0.5rem; 34 | align-items: center; 35 | } 36 | 37 | .name-input { 38 | flex: 1; 39 | padding: 0.75rem 1rem; 40 | border: 2px solid #dee2e6; 41 | border-radius: 8px; 42 | font-size: 1rem; 43 | transition: border-color 0.3s ease; 44 | } 45 | 46 | .name-input:focus { 47 | outline: none; 48 | border-color: #4ecdc4; 49 | box-shadow: 0 0 0 3px rgba(78, 205, 196, 0.1); 50 | } 51 | 52 | .name-input:disabled { 53 | background-color: #f8f9fa; 54 | color: #6c757d; 55 | } 56 | 57 | .random-button { 58 | padding: 0.75rem; 59 | border: 2px solid #6c757d; 60 | border-radius: 8px; 61 | background: white; 62 | cursor: pointer; 63 | font-size: 1.2rem; 64 | transition: all 0.3s ease; 65 | min-width: 48px; 66 | } 67 | 68 | .random-button:hover:not(:disabled) { 69 | background: #f8f9fa; 70 | transform: scale(1.05); 71 | } 72 | 73 | .random-button:disabled { 74 | opacity: 0.5; 75 | cursor: not-allowed; 76 | } 77 | 78 | .add-button { 79 | padding: 0.75rem 1.5rem; 80 | border: none; 81 | border-radius: 8px; 82 | color: white; 83 | font-weight: 600; 84 | cursor: pointer; 85 | transition: all 0.3s ease; 86 | font-size: 1rem; 87 | } 88 | 89 | .add-button:hover:not(:disabled) { 90 | opacity: 0.9; 91 | transform: translateY(-1px); 92 | box-shadow: 0 4px 12px rgba(0, 0, 0, 0.15); 93 | } 94 | 95 | .add-button:disabled { 96 | opacity: 0.6; 97 | cursor: not-allowed; 98 | transform: none; 99 | } 100 | 101 | .error-message { 102 | background: #ffe6e6; 103 | border: 2px solid #ff9999; 104 | border-radius: 8px; 105 | padding: 1rem; 106 | margin: 1rem 0; 107 | color: #cc0000; 108 | } 109 | 110 | .error-message p { 111 | margin: 0; 112 | } 113 | 114 | .names-section { 115 | margin-top: 2rem; 116 | } 117 | 118 | .section-header { 119 | display: flex; 120 | justify-content: space-between; 121 | align-items: center; 122 | margin-bottom: 1rem; 123 | padding-bottom: 0.5rem; 124 | border-bottom: 2px solid #e9ecef; 125 | } 126 | 127 | .section-header h3 { 128 | margin: 0; 129 | font-size: 1.4rem; 130 | color: #495057; 131 | } 132 | 133 | .refresh-button { 134 | padding: 0.5rem; 135 | border: 2px solid #6c757d; 136 | border-radius: 6px; 137 | background: white; 138 | cursor: pointer; 139 | font-size: 1.2rem; 140 | transition: all 0.3s ease; 141 | min-width: 40px; 142 | } 143 | 144 | .refresh-button:hover:not(:disabled) { 145 | background: #f8f9fa; 146 | transform: rotate(180deg); 147 | } 148 | 149 | .refresh-button:disabled { 150 | opacity: 0.5; 151 | cursor: not-allowed; 152 | } 153 | 154 | .loading { 155 | text-align: center; 156 | padding: 3rem; 157 | color: #6c757d; 158 | } 159 | 160 | .loading-spinner { 161 | width: 40px; 162 | height: 40px; 163 | border: 4px solid #f3f3f3; 164 | border-top: 4px solid #4ecdc4; 165 | border-radius: 50%; 166 | animation: spin 1s linear infinite; 167 | margin: 0 auto 1rem; 168 | } 169 | 170 | @keyframes spin { 171 | 0% { transform: rotate(0deg); } 172 | 100% { transform: rotate(360deg); } 173 | } 174 | 175 | .empty-state { 176 | text-align: center; 177 | padding: 3rem; 178 | color: #6c757d; 179 | background: #f8f9fa; 180 | border-radius: 12px; 181 | border: 2px dashed #dee2e6; 182 | } 183 | 184 | .empty-state p { 185 | margin: 0.5rem 0; 186 | } 187 | 188 | .names-grid { 189 | display: grid; 190 | grid-template-columns: repeat(auto-fill, minmax(250px, 1fr)); 191 | gap: 1rem; 192 | margin-top: 1rem; 193 | } 194 | 195 | .name-card { 196 | display: flex; 197 | justify-content: space-between; 198 | align-items: center; 199 | padding: 1rem; 200 | background: white; 201 | border: 2px solid #e9ecef; 202 | border-radius: 8px; 203 | transition: all 0.3s ease; 204 | } 205 | 206 | .name-card:hover { 207 | box-shadow: 0 4px 12px rgba(0, 0, 0, 0.1); 208 | transform: translateY(-2px); 209 | } 210 | 211 | .name-text { 212 | font-weight: 500; 213 | color: #495057; 214 | flex: 1; 215 | margin-right: 0.5rem; 216 | word-break: break-word; 217 | } 218 | 219 | .delete-button { 220 | padding: 0.25rem 0.5rem; 221 | border: 2px solid #dc3545; 222 | border-radius: 4px; 223 | background: white; 224 | color: #dc3545; 225 | cursor: pointer; 226 | font-size: 0.9rem; 227 | transition: all 0.3s ease; 228 | min-width: 30px; 229 | display: flex; 230 | align-items: center; 231 | justify-content: center; 232 | } 233 | 234 | .delete-button:hover:not(:disabled) { 235 | background: #dc3545; 236 | color: white; 237 | transform: scale(1.1); 238 | } 239 | 240 | .delete-button:disabled { 241 | opacity: 0.5; 242 | cursor: not-allowed; 243 | } 244 | 245 | @media (max-width: 768px) { 246 | .beast-manager { 247 | padding: 0; 248 | } 249 | 250 | .input-group { 251 | flex-direction: column; 252 | } 253 | 254 | .name-input { 255 | margin-bottom: 0.5rem; 256 | } 257 | 258 | .random-button, .add-button { 259 | width: 100%; 260 | } 261 | 262 | .section-header { 263 | flex-direction: column; 264 | align-items: flex-start; 265 | gap: 0.5rem; 266 | } 267 | 268 | .names-grid { 269 | grid-template-columns: 1fr; 270 | } 271 | 272 | .name-card { 273 | padding: 0.75rem; 274 | } 275 | } 276 | -------------------------------------------------------------------------------- /source/mythical-beasts-frontend/src/components/BeastManager.js: -------------------------------------------------------------------------------- 1 | import React, { useState, useEffect } from 'react'; 2 | import './BeastManager.css'; 3 | import { getBeastData, addBeastName, deleteBeastName } from '../services/api'; 4 | 5 | const BeastManager = ({ beast, serverStatus }) => { 6 | const [beastNames, setBeastNames] = useState([]); 7 | const [newName, setNewName] = useState(''); 8 | const [loading, setLoading] = useState(false); 9 | const [error, setError] = useState(null); 10 | const [actionLoading, setActionLoading] = useState(null); 11 | 12 | // Load beast data when beast type changes 13 | useEffect(() => { 14 | if (serverStatus === 'connected') { 15 | loadBeastData(); 16 | } 17 | }, [beast.id, serverStatus]); 18 | 19 | const loadBeastData = async () => { 20 | setLoading(true); 21 | setError(null); 22 | try { 23 | const data = await getBeastData(beast.id); 24 | setBeastNames(data || []); 25 | } catch (err) { 26 | setError(`Failed to load ${beast.name} data. Server might be unavailable.`); 27 | setBeastNames([]); 28 | } finally { 29 | setLoading(false); 30 | } 31 | }; 32 | 33 | const handleAddName = async (e) => { 34 | e.preventDefault(); 35 | if (!newName.trim()) return; 36 | 37 | setActionLoading('add'); 38 | setError(null); 39 | try { 40 | await addBeastName(beast.id, newName.trim()); 41 | setNewName(''); 42 | await loadBeastData(); // Refresh the list 43 | } catch (err) { 44 | if (err.response?.status === 500 && err.response?.data?.constraint) { 45 | setError(`"${newName}" already exists in the ${beast.name} collection.`); 46 | } else { 47 | setError(`Failed to add "${newName}". ${err.response?.data || err.message}`); 48 | } 49 | } finally { 50 | setActionLoading(null); 51 | } 52 | }; 53 | 54 | const handleDeleteName = async (nameToDelete) => { 55 | if (!window.confirm(`Are you sure you want to remove "${nameToDelete.name}" from the ${beast.name} collection?`)) { 56 | return; 57 | } 58 | 59 | setActionLoading(`delete-${nameToDelete.id}`); 60 | setError(null); 61 | try { 62 | await deleteBeastName(beast.id, nameToDelete.name); 63 | await loadBeastData(); // Refresh the list 64 | } catch (err) { 65 | setError(`Failed to delete "${nameToDelete.name}". ${err.response?.data || err.message}`); 66 | } finally { 67 | setActionLoading(null); 68 | } 69 | }; 70 | 71 | const generateRandomName = () => { 72 | const prefixes = { 73 | unicorn: ['Sparkle', 'Starlight', 'Crystal', 'Rainbow', 'Silvermane', 'Moonbeam'], 74 | manticore: ['Fierce', 'Shadow', 'Thunder', 'Venom', 'Razorclaw', 'Darkwing'], 75 | illithid: ['Mind', 'Void', 'Psychic', 'Tentacle', 'Brain', 'Psionic'], 76 | owlbear: ['Feather', 'Claw', 'Hoot', 'Forest', 'Wise', 'Night'], 77 | beholder: ['All-seeing', 'Eye', 'Gaze', 'Watcher', 'Orb', 'Vision'] 78 | }; 79 | 80 | const suffixes = ['storm', 'shadow', 'light', 'fang', 'wing', 'heart', 'soul', 'blade', 'fire', 'frost']; 81 | 82 | const beastPrefixes = prefixes[beast.id] || prefixes.unicorn; 83 | const randomPrefix = beastPrefixes[Math.floor(Math.random() * beastPrefixes.length)]; 84 | const randomSuffix = suffixes[Math.floor(Math.random() * suffixes.length)]; 85 | 86 | return `${randomPrefix}${randomSuffix}`; 87 | }; 88 | 89 | if (serverStatus === 'disconnected') { 90 | return ( 91 |
92 |

93 | {beast.emoji} {beast.name} Manager 94 |

95 |
96 |

⚠️ Server is currently unavailable

97 |

Please ensure the mythical-beasts-server is running on port 4000

98 |
99 |
100 | ); 101 | } 102 | 103 | return ( 104 |
105 |

106 | {beast.emoji} {beast.name} Manager 107 |

108 | 109 |
110 |
111 |
112 | setNewName(e.target.value)} 116 | placeholder={`Enter a ${beast.name.toLowerCase()} name...`} 117 | className="name-input" 118 | disabled={loading || actionLoading} 119 | /> 120 | 129 | 137 |
138 |
139 |
140 | 141 | {error && ( 142 |
143 |

{error}

144 |
145 | )} 146 | 147 |
148 |
149 |

Current {beast.name}s ({beastNames.length})

150 | 158 |
159 | 160 | {loading ? ( 161 |
162 |
163 |

Loading {beast.name.toLowerCase()}s...

164 |
165 | ) : beastNames.length === 0 ? ( 166 |
167 |

No {beast.name.toLowerCase()}s found

168 |

Add some names to get started!

169 |
170 | ) : ( 171 |
172 | {beastNames.map((name, index) => ( 173 |
174 | {name.name} 175 | 183 |
184 | ))} 185 |
186 | )} 187 |
188 |
189 | ); 190 | }; 191 | 192 | export default BeastManager; 193 | -------------------------------------------------------------------------------- /source/mythical-beasts-frontend/src/faro.js: -------------------------------------------------------------------------------- 1 | import { getWebInstrumentations, initializeFaro } from '@grafana/faro-web-sdk'; 2 | import { TracingInstrumentation } from '@grafana/faro-web-tracing'; 3 | 4 | let faro = null; 5 | 6 | export const initFaro = () => { 7 | // Return early if already initialized 8 | if (faro) { 9 | return faro; 10 | } 11 | 12 | try { 13 | // Get the Alloy endpoint from environment 14 | const alloyEndpoint = process.env.REACT_APP_ALLOY_ENDPOINT || 'http://localhost:12350/collect'; 15 | 16 | console.log('🔍 Initializing Faro with endpoint:', alloyEndpoint); 17 | 18 | // Initialisation with a average configuration. 19 | faro = initializeFaro({ 20 | // This is the endpoint to send telemetry to. In this case, we're sending to Alloy. 21 | url: alloyEndpoint, 22 | // The application details will set relevant resources attributes. 23 | app: { 24 | name: 'mythical-frontend', 25 | version: '1.0.0', 26 | environment: 'development', 27 | }, 28 | 29 | // Basic session tracking 30 | sessionTracking: { 31 | enabled: true, 32 | persistent: false, // Simplified to avoid storage issues 33 | }, 34 | 35 | // Configure batching with longer timeouts to be a bit more forgiving on slower machines. 36 | batching: { 37 | enabled: true, 38 | sendTimeout: 5000, // Longer timeout to prevent connection issues 39 | }, 40 | 41 | // Basic instrumentation. 42 | instrumentations: [ 43 | ...getWebInstrumentations({ 44 | captureConsole: false, // Disable console capture to reduce noisey output. Set to true to capture: 45 | captureConsoleDisabledLevels: ['debug', 'log'], 46 | }), 47 | 48 | // Add tracing instrumentation with simple configuration. 49 | new TracingInstrumentation({ 50 | instrumentationOptions: { 51 | // Only trace API calls to our backend 52 | propagateTraceHeaderCorsUrls: [ 53 | /^\/api\//, // Relative URLs for nginx proxied requests 54 | /^http:\/\/localhost:4000/, // Direct API calls 55 | ], 56 | }, 57 | }), 58 | ], 59 | 60 | // Some basic error handling. 61 | beforeSend: (event) => { 62 | // Simple pass-through with error handling 63 | try { 64 | return event; 65 | } catch (error) { 66 | console.warn('Faro beforeSend error:', error); 67 | return null; 68 | } 69 | }, 70 | }); 71 | 72 | console.log('✅ Faro initialized successfully'); 73 | 74 | // Send a simple test event 75 | setTimeout(() => { 76 | try { 77 | faro.api.pushLog(['Faro SDK test log'], { 78 | level: 'info', 79 | context: { source: 'faro-init' } 80 | }); 81 | } catch (error) { 82 | console.warn('Failed to send test log:', error); 83 | } 84 | }, 1000); 85 | 86 | return faro; 87 | 88 | } catch (error) { 89 | console.warn('⚠️ Failed to initialize Faro SDK:', error); 90 | 91 | // Return a safe mock object to prevent issues 92 | faro = { 93 | api: { 94 | pushLog: () => {}, 95 | pushError: () => {}, 96 | pushMeasurement: (measurement, operation) => { 97 | // If there's an operation function, just call it directly 98 | if (typeof operation === 'function') { 99 | return operation(); 100 | } 101 | return Promise.resolve(); 102 | }, 103 | getTraceContext: () => null, 104 | } 105 | }; 106 | 107 | return faro; 108 | } 109 | }; 110 | 111 | // Shim to export the Faro object. 112 | export const getFaro = () => { 113 | if (!faro) { 114 | return initFaro(); 115 | } 116 | return faro; 117 | }; 118 | 119 | export default { initFaro, getFaro }; 120 | -------------------------------------------------------------------------------- /source/mythical-beasts-frontend/src/index.css: -------------------------------------------------------------------------------- 1 | * { 2 | box-sizing: border-box; 3 | } 4 | 5 | body { 6 | margin: 0; 7 | font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', 'Roboto', 'Oxygen', 8 | 'Ubuntu', 'Cantarell', 'Fira Sans', 'Droid Sans', 'Helvetica Neue', 9 | sans-serif; 10 | -webkit-font-smoothing: antialiased; 11 | -moz-osx-font-smoothing: grayscale; 12 | background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); 13 | min-height: 100vh; 14 | } 15 | 16 | code { 17 | font-family: source-code-pro, Menlo, Monaco, Consolas, 'Courier New', 18 | monospace; 19 | } 20 | 21 | button { 22 | font-family: inherit; 23 | } 24 | 25 | input { 26 | font-family: inherit; 27 | } 28 | 29 | /* Scrollbar styling */ 30 | ::-webkit-scrollbar { 31 | width: 8px; 32 | } 33 | 34 | ::-webkit-scrollbar-track { 35 | background: rgba(0, 0, 0, 0.1); 36 | border-radius: 4px; 37 | } 38 | 39 | ::-webkit-scrollbar-thumb { 40 | background: rgba(255, 255, 255, 0.3); 41 | border-radius: 4px; 42 | } 43 | 44 | ::-webkit-scrollbar-thumb:hover { 45 | background: rgba(255, 255, 255, 0.5); 46 | } 47 | -------------------------------------------------------------------------------- /source/mythical-beasts-frontend/src/index.js: -------------------------------------------------------------------------------- 1 | import React from 'react'; 2 | import ReactDOM from 'react-dom/client'; 3 | import './index.css'; 4 | import App from './App'; 5 | import { initFaro } from './faro'; 6 | 7 | // Always initialise Faro first, to ensure that it is available for the React app. 8 | console.log('🔍 Initializing Faro SDK before React app...'); 9 | try { 10 | initFaro(); 11 | console.log('✅ Faro SDK initialized successfully'); 12 | } catch (error) { 13 | console.warn('⚠️ Faro SDK failed to initialize, continuing without observability:', error); 14 | } 15 | 16 | // Render the app! 17 | const root = ReactDOM.createRoot(document.getElementById('root')); 18 | root.render( 19 | 20 | 21 | 22 | ); 23 | -------------------------------------------------------------------------------- /source/mythical-beasts-frontend/src/services/api.js: -------------------------------------------------------------------------------- 1 | import axios from 'axios'; 2 | import { getFaro } from '../faro'; 3 | 4 | // Base URL for the API - configurable for different environments 5 | // Default tries the nginx proxy first, falls back to direct API access 6 | const getApiBaseUrl = () => { 7 | // Check if we're running in production build 8 | const isProduction = process.env.NODE_ENV === 'production'; 9 | 10 | // Use environment variable if set 11 | if (process.env.REACT_APP_API_URL) { 12 | return process.env.REACT_APP_API_URL; 13 | } 14 | 15 | // In production (Docker), try to use relative path for nginx proxy 16 | if (isProduction && window.location.hostname === 'localhost' && window.location.port === '3001') { 17 | return '/api'; // Use nginx proxy 18 | } 19 | 20 | // Default to direct API access 21 | return 'http://localhost:4000'; 22 | }; 23 | 24 | const API_BASE_URL = getApiBaseUrl(); 25 | 26 | // Create axios instance with default config 27 | const api = axios.create({ 28 | baseURL: API_BASE_URL, 29 | timeout: 10000, 30 | headers: { 31 | 'Content-Type': 'application/json', 32 | }, 33 | }); 34 | 35 | // Request interceptor for logging and trace propagation 36 | api.interceptors.request.use( 37 | (config) => { 38 | console.log(`🚀 API Request: ${config.method?.toUpperCase()} ${config.url}`); 39 | 40 | return config; 41 | }, 42 | (error) => { 43 | console.error('❌ API Request Error:', error); 44 | return Promise.reject(error); 45 | } 46 | ); 47 | 48 | // Response interceptor for error handling and logging 49 | api.interceptors.response.use( 50 | (response) => { 51 | console.log(`✅ API Response: ${response.status} ${response.config.url}`); 52 | return response; 53 | }, 54 | (error) => { 55 | console.error('❌ API Response Error:', error.response?.status, error.message); 56 | 57 | return Promise.reject(error); 58 | } 59 | ); 60 | 61 | /** 62 | * Get all names for a specific beast type 63 | * @param {string} beastType - The type of beast (unicorn, manticore, etc.) 64 | * @returns {Promise} Array of beast names 65 | */ 66 | export const getBeastData = async (beastType) => { 67 | // Simple operation - Faro automatically instruents this HTTP request 68 | try { 69 | const response = await api.get(`/${beastType}`); 70 | return response.data; 71 | } catch (error) { 72 | console.error(`Error fetching ${beastType} data:`, error); 73 | throw error; 74 | } 75 | }; 76 | 77 | /** 78 | * Add a new name to a specific beast type 79 | * @param {string} beastType - The type of beast 80 | * @param {string} name - The name to add 81 | * @returns {Promise} 82 | */ 83 | export const addBeastName = async (beastType, name) => { 84 | try { 85 | const response = await api.post(`/${beastType}`, { name }); 86 | return response.data; 87 | } catch (error) { 88 | console.error(`Error adding ${name} to ${beastType}:`, error); 89 | throw error; 90 | } 91 | }; 92 | 93 | /** 94 | * Delete a name from a specific beast type 95 | * @param {string} beastType - The type of beast 96 | * @param {string} name - The name to delete 97 | * @returns {Promise} 98 | */ 99 | export const deleteBeastName = async (beastType, name) => { 100 | try { 101 | const response = await api.delete(`/${beastType}`, { 102 | data: { name } 103 | }); 104 | return response.data; 105 | } catch (error) { 106 | console.error(`Error deleting ${name} from ${beastType}:`, error); 107 | throw error; 108 | } 109 | }; 110 | 111 | /** 112 | * Check server health/connectivity 113 | * @returns {Promise} True if server is accessible 114 | */ 115 | export const checkServerHealth = async () => { 116 | try { 117 | // Try to fetch data from a known endpoint 118 | await getBeastData('unicorn'); 119 | return true; 120 | } catch (error) { 121 | return false; 122 | } 123 | }; 124 | 125 | export default api; 126 | -------------------------------------------------------------------------------- /source/mythical-beasts-recorder/index.js: -------------------------------------------------------------------------------- 1 | const tracingUtils = require('./tracing')('recorder', 'mythical-recorder'); 2 | const express = require('express'); 3 | const promClient = require('prom-client'); 4 | const queueUtils = require('./queue')(); 5 | const Pyroscope = require('@pyroscope/nodejs'); 6 | 7 | // Prometheus client registration 8 | const app = express(); 9 | const register = promClient.register; 10 | register.setContentType(promClient.Registry.OPENMETRICS_CONTENT_TYPE); 11 | 12 | // Status response bucket (histogram) 13 | const messagesCounter = new promClient.Counter({ 14 | name: 'mythical_messages_recorded', 15 | help: 'The amount of messages recorded by the mythical-recorder', 16 | }); 17 | 18 | // Metrics endpoint handler (for Prometheus scraping) 19 | app.get('/metrics', async (req, res) => { 20 | res.set('Content-Type', register.contentType); 21 | res.send(await register.metrics()); 22 | }); 23 | 24 | // Initialise the Pyroscope library to send pprof data. 25 | Pyroscope.init({ 26 | serverAddress: `http://${process.env.PROFILE_COLLECTOR_HOST}:${process.env.PROFILE_COLLECTOR_PORT}`, 27 | appName: 'mythical-recorder', 28 | wall: { 29 | collectCpuTime: true, 30 | }, 31 | tags: { 32 | namespace: `${process.env.NAMESPACE ?? 'mythical'}` 33 | }, 34 | }); 35 | Pyroscope.start(); 36 | 37 | const startQueueConsumer = async () => { 38 | const tracingObj = await tracingUtils(); 39 | const { consumeMessages } = await queueUtils( tracingObj ); 40 | 41 | const { tracer } = tracingObj; 42 | 43 | await consumeMessages(async msg => { 44 | tracer.startActiveSpan('process_message', async span => { 45 | messagesCounter.inc(); 46 | 47 | if (msg.content.toString().match(/(?:\/beholder|\/unicorn)/i)) { 48 | await new Promise(r => setTimeout(r, (Math.random() * 1000) + 500)); 49 | } 50 | 51 | if (msg !== null) { 52 | console.log(`Received a message: ${msg.content.toString()}`); 53 | } else { 54 | console.log('Consumer cancelled by server'); 55 | } 56 | 57 | // Pretend to do some work here 58 | const workTime = (Math.random() * 30) + 20; 59 | await new Promise(resolve => setTimeout(resolve, workTime)); 60 | 61 | span.end(); 62 | }); 63 | }); 64 | } 65 | 66 | // Listen to API connections for metrics scraping. 67 | app.listen(4002); 68 | 69 | startQueueConsumer(); 70 | -------------------------------------------------------------------------------- /source/mythical-beasts-recorder/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "mythical-beasts-recorder", 3 | "version": "1.0.1", 4 | "description": "", 5 | "main": "index.js", 6 | "scripts": { 7 | "test": "echo \"Error: no test specified\" && exit 1", 8 | "start": "node index.js" 9 | }, 10 | "author": "", 11 | "license": "ISC", 12 | "dependencies": { 13 | "@opentelemetry/api": "^1.9.0", 14 | "@opentelemetry/auto-instrumentations-node": "^0.60.1", 15 | "@opentelemetry/core": "^2.0.1", 16 | "@opentelemetry/exporter-trace-otlp-grpc": "^0.202.0", 17 | "@opentelemetry/instrumentation": "^0.202.0", 18 | "@opentelemetry/resources": "^2.0.1", 19 | "@opentelemetry/sdk-trace-base": "^2.0.1", 20 | "@opentelemetry/sdk-trace-node": "^2.0.1", 21 | "@opentelemetry/semantic-conventions": "^1.34.0", 22 | "@pyroscope/nodejs": "^0.4.5", 23 | "amqplib": "^0.10.5", 24 | "axios": "^1.10.0", 25 | "express": "^5.1.0", 26 | "prom-client": "github:voltbit/prom-client#add-openmetrics-support" 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /source/mythical-beasts-requester/index.js: -------------------------------------------------------------------------------- 1 | const tracingUtils = require('./tracing')('requester', 'mythical-requester'); 2 | const Pyroscope = require('@pyroscope/nodejs'); 3 | const axios = require('axios'); 4 | const { uniqueNamesGenerator, names, colors, animals } = require('unique-names-generator'); 5 | const logUtils = require('./logging')('mythical-requester', 'requester'); 6 | const express = require('express'); 7 | const promClient = require('prom-client'); 8 | const { nameSet, servicePrefix, spanTag, accumulators } = require('./endpoints')(); 9 | const queueUtils = require('./queue')(); 10 | 11 | // Prometheus client registration 12 | const app = express(); 13 | const register = promClient.register; 14 | register.setContentType(promClient.Registry.OPENMETRICS_CONTENT_TYPE); 15 | 16 | let logEntry; 17 | 18 | // What a horrible thing to do, global span context for linking. 19 | // You would not do this in production code, you'd use propagation and baggage. 20 | let previousReqSpanContext; 21 | 22 | // Status response bucket (histogram) 23 | const dangerGauge = new promClient.Gauge({ 24 | name: 'mythical_danger_level_30s', 25 | help: 'Recent accumulated danger level over the past 30 seconds', 26 | }); 27 | 28 | // Metrics endpoint handler (for Prometheus scraping) 29 | app.get('/metrics', async (req, res) => { 30 | res.set('Content-Type', register.contentType); 31 | res.send(await register.metrics()); 32 | }); 33 | 34 | // Initialise the Pyroscope library to send pprof data. 35 | Pyroscope.init({ 36 | serverAddress: `http://${process.env.PROFILE_COLLECTOR_HOST}:${process.env.PROFILE_COLLECTOR_PORT}`, 37 | appName: 'mythical-requester', 38 | wall: { 39 | collectCpuTime: true, 40 | }, 41 | tags: { 42 | namespace: `${process.env.NAMESPACE ?? 'mythical'}`, 43 | }, 44 | }); 45 | Pyroscope.start(); 46 | 47 | // We just keep going, requesting names and adding them 48 | const makeRequest = async (tracingObj, sendMessage, logEntry) => { 49 | const { api, tracer, propagator } = tracingObj; 50 | const type = (Math.floor(Math.random() * 100) < 50) ? 'GET' : 'POST'; 51 | const index = Math.floor(Math.random() * nameSet.length); 52 | const endpoint = nameSet[index]; 53 | const dangerLevel = accumulators[index]; 54 | let headers = {}; 55 | let error = false; 56 | 57 | // This method is used to generate a time 40 minutes in the past for the logs. 58 | let timeshift = () => { 59 | var date = (process.env.TIMESHIFT) ? new Date(Date.now() - (1000 * 60 * 40)) : new Date(Date.now()); 60 | return date.toISOString(); 61 | } 62 | 63 | // Create a new span, link to previous request to show how linking between traces works. 64 | const requestSpan = tracer.startSpan('requester', { 65 | kind: api.SpanKind.CLIENT, 66 | links: (previousReqSpanContext) ? [{ context: previousReqSpanContext }] : undefined, 67 | }); 68 | requestSpan.setAttribute(spanTag, endpoint); 69 | requestSpan.setAttribute(`http.target`, '/' + endpoint); 70 | requestSpan.setAttribute(`http.method`, type); 71 | requestSpan.setAttribute('service.version', (Math.floor(Math.random() * 100)) < 50 ? '1.9.2' : '2.0.0'); 72 | previousReqSpanContext = requestSpan.spanContext(); 73 | const { traceId } = requestSpan.spanContext(); 74 | 75 | // Increment the danger level on the gauge 76 | dangerGauge.inc(dangerLevel); 77 | 78 | let serverHostPort = "mythical-server:4000" 79 | // check env var for override 80 | if (process.env.MYTHICAL_SERVER_HOST_PORT) { 81 | serverHostPort = process.env.MYTHICAL_SERVER_HOST_PORT 82 | } 83 | 84 | // Create a new context for this request 85 | api.context.with(api.trace.setSpan(api.context.active(), requestSpan), async () => { 86 | const start = Date.now(); 87 | // Add the headers required for trace propagation 88 | headers = propagator(requestSpan); 89 | 90 | if (type === 'GET') { 91 | let names; 92 | try { 93 | const result = await axios.get(`http://${serverHostPort}/${endpoint}`, { headers }); 94 | sendMessage(`GET /${endpoint}`); 95 | logEntry({ 96 | level: 'info', 97 | namespace: process.env.NAMESPACE, 98 | job: `${servicePrefix}-requester`, 99 | endpointLabel: spanTag, 100 | endpoint, 101 | message: `traceID=${traceId} http.method=GET endpoint=${endpoint} loggedtime=${timeshift()} status=SUCCESS`, 102 | }); 103 | names = result.data; 104 | 105 | // Deletion probability is based on the array index. 106 | let delProb = (index / nameSet.length) * 100; 107 | if (Math.floor(Math.random() * 100) < delProb) { 108 | if (names.length > 0) { 109 | await axios.delete(`http://${serverHostPort}/${endpoint}`, { 110 | data: { name: names[0].name }, 111 | headers: headers 112 | }); 113 | sendMessage(`DELETE /${endpoint} ${names[0].name}`); 114 | logEntry({ 115 | level: 'info', 116 | namespace: process.env.NAMESPACE, 117 | job: `${servicePrefix}-requester`, 118 | endpointLabel: spanTag, 119 | endpoint, 120 | message: `traceID=${traceId} http.method=DELETE endpoint=${endpoint} loggedtime=${timeshift()} status=SUCCESS`, 121 | }); 122 | } 123 | } 124 | } catch (err) { 125 | logEntry({ 126 | level: 'error', 127 | namespace: process.env.NAMESPACE, 128 | job: `${servicePrefix}-requester`, 129 | endpointLabel: spanTag, 130 | endpoint, 131 | message: `traceID=${traceId} http.method=DELETE endpoint=${endpoint} ` + 132 | `name=${(names) ? names[0].name : 'unknown'} status=FAILURE loggedtime=${timeshift()}`, 133 | }); 134 | error = true; 135 | } 136 | } else { 137 | // Generate new name 138 | const randomName = uniqueNamesGenerator({ dictionaries: [colors, names, animals] }); 139 | const body = { name : randomName }; 140 | try { 141 | await axios.post(`http://${serverHostPort}/${endpoint}`, body, { headers }); 142 | sendMessage(`POST /${endpoint} ${JSON.stringify(body)}`); 143 | logEntry({ 144 | level: 'info', 145 | namespace: process.env.NAMESPACE, 146 | job: `${servicePrefix}-requester`, 147 | endpointLabel: spanTag, 148 | endpoint, 149 | message: `traceID=${traceId} http.method=POST endpoint=${endpoint} loggedtime=${timeshift()} status=SUCCESS`, 150 | }); 151 | } catch (err) { 152 | // The error condition is a little different here to using request. Axios throws a more generic error 153 | // which means that it's not obvious from the logs went wrong. You need to look at the mythical-server 154 | // logs to do so. This is a better example of drilldown and triage to previously. 155 | logEntry({ 156 | level: 'error', 157 | namespace: process.env.NAMESPACE, 158 | job: `${servicePrefix}-requester`, 159 | endpointLabel: spanTag, 160 | endpoint, 161 | message: `traceID=${traceId} http.method=POST endpoint=${endpoint} name=${randomName}` + 162 | ` loggedtime=${timeshift()} status=FAILURE`, 163 | }); 164 | error = true; 165 | } 166 | 167 | } 168 | logEntry({ 169 | level: 'info', 170 | namespace: process.env.NAMESPACE, 171 | job: `${servicePrefix}-requester`, 172 | endpointLabel: spanTag, 173 | endpoint, 174 | message: `traceID=${traceId} http.method=${type} endpoint=${endpoint} duration=${Date.now() - start}ms loggedtime=${timeshift()}`, 175 | }); 176 | 177 | // Set the status code as OK and end the span 178 | if (error) { 179 | const version = (Math.floor(Math.random() * 100)); 180 | if (version < 70) { 181 | requestSpan.setAttribute('service.version', '2.0.0'); 182 | } 183 | } 184 | requestSpan.setStatus({ code: (!error) ? api.SpanStatusCode.OK : api.SpanStatusCode.ERROR }); 185 | requestSpan.end(); 186 | }); 187 | 188 | // The following awful code creates spikes in the request rate which makes for more interesting graphs 189 | // Joe Elliott did not write this. Do not check the blame. 190 | counter++; 191 | if (counter >= 3000) { 192 | counter = 0; 193 | } 194 | 195 | var nextReqIn; 196 | if (counter < 2000) { 197 | // Choose low values in the first minute of every 5-minute interval 198 | nextReqIn = Math.floor(Math.random() * 50); 199 | } else { 200 | // Choose high values for the next 4 minutes 201 | nextReqIn = Math.floor(Math.random() * 1000) + 100; 202 | } 203 | 204 | // Sometime in the next two seconds, but larger than 100ms 205 | //const nextReqIn = (Math.random() * 1000) + 100; 206 | setTimeout(() => makeRequest(tracingObj, sendMessage, logEntry), nextReqIn); 207 | }; 208 | 209 | let counter = 0; 210 | 211 | (async () => { 212 | const tracingObj = await tracingUtils(); 213 | const { sendMessage } = await queueUtils(tracingObj); 214 | logEntry = await logUtils(tracingObj); 215 | 216 | // Kick off four requests that cycle at regular intervals 217 | setTimeout(() => makeRequest(tracingObj, sendMessage, logEntry), 5000); 218 | setTimeout(() => makeRequest(tracingObj, sendMessage, logEntry), 6000); 219 | setTimeout(() => makeRequest(tracingObj, sendMessage, logEntry), 7000); 220 | setTimeout(() => makeRequest(tracingObj, sendMessage, logEntry), 8000); 221 | 222 | // Ensure the danger gauge gets reset every minute 223 | setInterval(() => { 224 | dangerGauge.set(0); 225 | }, 30000); 226 | 227 | // Listen to API connections for metrics scraping. 228 | app.listen(4001); 229 | })(); 230 | -------------------------------------------------------------------------------- /source/mythical-beasts-requester/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "httprequester", 3 | "version": "1.0.1", 4 | "description": "", 5 | "main": "index.js", 6 | "scripts": { 7 | "test": "echo \"Error: no test specified\" && exit 1", 8 | "start": "node index.js" 9 | }, 10 | "author": "", 11 | "license": "ISC", 12 | "dependencies": { 13 | "@opentelemetry/api": "^1.9.0", 14 | "@opentelemetry/auto-instrumentations-node": "^0.60.1", 15 | "@opentelemetry/core": "^2.0.1", 16 | "@opentelemetry/exporter-trace-otlp-grpc": "^0.202.0", 17 | "@opentelemetry/instrumentation": "^0.202.0", 18 | "@opentelemetry/resources": "^2.0.1", 19 | "@opentelemetry/sdk-trace-base": "^2.0.1", 20 | "@opentelemetry/sdk-trace-node": "^2.0.1", 21 | "@opentelemetry/semantic-conventions": "^1.34.0", 22 | "@pyroscope/nodejs": "^0.4.5", 23 | "amqplib": "^0.10.5", 24 | "axios": "^1.10.0", 25 | "express": "^5.1.0", 26 | "package.json": "^0.0.0", 27 | "prom-client": "github:voltbit/prom-client#add-openmetrics-support", 28 | "unique-names-generator": "^4.7.1" 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /source/mythical-beasts-server/index.js: -------------------------------------------------------------------------------- 1 | const traceUtils = require('./tracing')('server', 'mythical-server'); 2 | const Pyroscope = require('@pyroscope/nodejs'); 3 | const logUtils = require('./logging')('mythical-server', 'server'); 4 | 5 | (async () => { 6 | const traceObj = await traceUtils(); 7 | const logEntry = await logUtils(traceObj); 8 | const { tracer, api } = traceObj; 9 | 10 | const promClient = require('prom-client'); 11 | const express = require('express'); 12 | const bodyParser = require('body-parser'); 13 | const cors = require('cors'); 14 | const { Client } = require('pg'); 15 | const { nameSet, servicePrefix, spanTag } = require('./endpoints')(); 16 | 17 | // Prometheus client registration 18 | const app = express(); 19 | const register = promClient.register; 20 | register.setContentType(promClient.Registry.OPENMETRICS_CONTENT_TYPE); 21 | 22 | // Database full teardown timeout 23 | let teardownTimeoutValue = 24 * 60 * 60 * 1000; // Default is every 24 hours (86400000) 24 | if (process.env.MYTHICAL_SERVER_DATABASE_TEARDOWN_TIMEOUT) { 25 | if (Number(process.env.MYTHICAL_SERVER_DATABASE_TEARDOWN_TIMEOUT) > 1 && Number(process.env.MYTHICAL_SERVER_DATABASE_TEARDOWN_TIMEOUT) <= 86400000) { 26 | teardownTimeoutValue = Number(process.env.MYTHICAL_SERVER_DATABASE_TEARDOWN_TIMEOUT) 27 | } 28 | } 29 | const teardownTimeout = teardownTimeoutValue 30 | let teardownInProgress = false; 31 | 32 | // Use JSON parsing in the request body 33 | app.use(bodyParser.json()); 34 | 35 | // Enable CORS for frontend access 36 | app.use(cors({ 37 | origin: [ 38 | 'http://localhost:3000', // Development React server 39 | 'http://localhost:3001', // Production frontend container 40 | //'http://127.0.0.1:3000', 41 | //'http://127.0.0.1:3001' 42 | ], 43 | credentials: true, 44 | methods: ['GET', 'POST', 'DELETE', 'OPTIONS'], 45 | allowedHeaders: ['Content-Type', 'Authorization'] 46 | })); 47 | 48 | let pgClient; 49 | 50 | // Database actions 51 | const Database = { 52 | GET: 0, 53 | POST: 1, 54 | DELETE: 2, 55 | DROP: 3, 56 | CREATE: 4, 57 | }; 58 | 59 | // Status response bucket (histogram) 60 | const responseBucket = new promClient.Histogram({ 61 | name: 'mythical_request_times', 62 | help: 'Response times for the endpoints', 63 | labelNames: ['method', 'status', spanTag], 64 | buckets: [10, 20, 50, 100, 200, 500, 1000, 2000, 4000, 8000, 16000], 65 | enableExemplars: true, 66 | }); 67 | 68 | // Database action function 69 | const databaseAction = async (action) => { 70 | // Which action? 71 | const span = api.trace.getSpan(api.context.active()); 72 | span.setAttribute('span.kind', api.SpanKind.CLIENT); 73 | if (action.method === Database.GET) { 74 | const results = await pgClient.query(`SELECT name from ${action.table}`); 75 | return results.rows; 76 | } else if (action.method === Database.POST) { 77 | return await pgClient.query(`INSERT INTO ${action.table}(name) VALUES ($1)`, [ action.name ]); 78 | } else if (action.method === Database.DELETE) { 79 | return await pgClient.query(`DELETE FROM ${action.table} WHERE name = $1`, [ action.name ]); 80 | } else if (action.method == Database.DROP) { 81 | const traceId = api.trace.getSpan(api.context.active()).spanContext(); 82 | for (const table of nameSet) { 83 | await pgClient.query(`DROP TABLE ${table}`); 84 | logEntry({ 85 | level: 'info', 86 | namespace: process.env.NAMESPACE, 87 | job: `${servicePrefix}-server`, 88 | message: `traceId=${traceId} message="Dropped table ${table}..."`, 89 | }); 90 | } 91 | return; 92 | } else if (action.method === Database.CREATE) { 93 | const traceId = api.trace.getSpan(api.context.active()).spanContext(); 94 | for (const table of nameSet) { 95 | await pgClient.query(`CREATE TABLE IF NOT EXISTS ${table}(id serial PRIMARY KEY, name VARCHAR (50) UNIQUE NOT NULL);`); 96 | logEntry({ 97 | level: 'info', 98 | namespace: process.env.NAMESPACE, 99 | job: `${servicePrefix}-server`, 100 | message: `traceId=${traceId} message="Created table ${table}..."`, 101 | }); 102 | } 103 | return; 104 | } 105 | 106 | logEntry({ 107 | level: 'error', 108 | namespace: process.env.NAMESPACE, 109 | job: `${servicePrefix}-server`, 110 | message: 'message="Method was not valid, throwing error"', 111 | }); 112 | throw new Error(`Not a valid ${spanTag} method!`); 113 | } 114 | 115 | // Response time bucket function (adds a Prometheus value) 116 | const responseMetric = (details) => { 117 | const timeMs = Date.now() - details.start; 118 | const spanContext = api.trace.getSpan(api.context.active()).spanContext(); 119 | responseBucket.observe({ 120 | labels: details.labels, 121 | value: timeMs, 122 | exemplarLabels: { 123 | traceID: spanContext.traceId, 124 | spanID: spanContext.spanId, 125 | }, 126 | }); 127 | }; 128 | 129 | // Metrics endpoint handler (for Prometheus scraping) 130 | app.get('/metrics', async (req, res) => { 131 | res.set('Content-Type', register.contentType); 132 | res.send(await register.metrics()); 133 | }); 134 | 135 | // Initialise the Pyroscope library to send pprof data. 136 | Pyroscope.init({ 137 | serverAddress: `http://${process.env.PROFILE_COLLECTOR_HOST}:${process.env.PROFILE_COLLECTOR_PORT}`, 138 | appName: 'mythical-server', 139 | wall: { 140 | collectCpuTime: true, 141 | }, 142 | tags: { 143 | namespace: `${process.env.NAMESPACE ?? 'mythical'}` 144 | }, 145 | }); 146 | Pyroscope.start(); 147 | 148 | // Generic GET endpoint 149 | app.get('/:endpoint', async (req, res) => { 150 | const endpoint = req.params.endpoint; 151 | const currentSpan = api.trace.getSpan(api.context.active()); 152 | const spanContext = currentSpan.spanContext(); 153 | const traceId = spanContext.traceId; 154 | 155 | currentSpan.setAttribute(spanTag, endpoint); 156 | 157 | let metricBody = { 158 | labels: { 159 | method: 'GET', 160 | }, 161 | start: Date.now(), 162 | }; 163 | metricBody.labels[spanTag] = endpoint; 164 | 165 | if (!nameSet.includes(endpoint)) { 166 | res.status(404).send(`${endpoint} is not a valid endpoint`); 167 | metricBody.labels.status = '404'; 168 | responseMetric(metricBody); 169 | return; 170 | } 171 | 172 | // If we're in the middle of a teardown, don't do anything 173 | if (teardownCheck({ 174 | spanContext, 175 | endpoint, 176 | method: 'GET', 177 | res, 178 | }) === true) { 179 | return; 180 | } 181 | 182 | // Retrieve all the names 183 | try { 184 | const results = await databaseAction({ 185 | method: Database.GET, 186 | table: endpoint, 187 | }); 188 | 189 | // Metrics 190 | metricBody.labels.status = '200'; 191 | responseMetric(metricBody); 192 | 193 | logEntry({ 194 | level: 'info', 195 | namespace: process.env.NAMESPACE, 196 | job: `${servicePrefix}-server`, 197 | endpointLabel: spanTag, 198 | endpoint, 199 | message: `traceID=${traceId} http.method=GET endpoint=${endpoint} status=SUCCESS`, 200 | }); 201 | 202 | res.send(results); 203 | } catch (err) { 204 | metricBody.labels.status = '500'; 205 | responseMetric(metricBody); 206 | 207 | logEntry({ 208 | level: 'error', 209 | namespace: process.env.NAMESPACE, 210 | job: `${servicePrefix}-server`, 211 | endpointLabel: spanTag, 212 | endpoint, 213 | message: `traceID=${traceId} http.method=GET endpoint=${endpoint} status=FAILURE error="${err}"`, 214 | }); 215 | 216 | res.status(500).send(err); 217 | } 218 | }); 219 | 220 | // Generic POST endpoint 221 | app.post('/:endpoint', async (req, res) => { 222 | const endpoint = req.params.endpoint; 223 | const currentSpan = api.trace.getSpan(api.context.active()); 224 | const spanContext = currentSpan.spanContext(); 225 | const traceId = spanContext.traceId; 226 | 227 | let metricBody = { 228 | labels: { 229 | method: 'POST', 230 | }, 231 | start: Date.now(), 232 | }; 233 | metricBody.labels[spanTag] = endpoint; 234 | 235 | if (!nameSet.includes(endpoint)) { 236 | res.status(404).send(`${endpoint} is not a valid endpoint`); 237 | metricBody.labels.status = '404'; 238 | responseMetric(metricBody); 239 | return; 240 | } 241 | 242 | if (!req.body || !req.body.name) { 243 | // Here we'd use 'respondToCall()' which would POST a metric for the response 244 | // code 245 | metricBody.labels.status = '400'; 246 | responseMetric(metricBody); 247 | } 248 | 249 | // If we're in the middle of a teardown, don't do anything 250 | if (teardownCheck({ 251 | spanContext, 252 | endpoint, 253 | method: 'POST', 254 | res, 255 | }) === true) { 256 | return; 257 | } 258 | // POST a new unicorn name 259 | try { 260 | let name = req.body.name 261 | if (process.env.ALWAYS_SUCCEED != "true" && Math.random() < 0.1) { 262 | name = null 263 | } 264 | 265 | await databaseAction({ 266 | method: Database.POST, 267 | table: endpoint, 268 | name: name, 269 | }); 270 | 271 | // Metrics 272 | metricBody.labels.status = '201'; 273 | responseMetric(metricBody); 274 | 275 | logEntry({ 276 | level: 'info', 277 | namespace: process.env.NAMESPACE, 278 | job: `${servicePrefix}-server`, 279 | endpointLabel: spanTag, 280 | endpoint, 281 | message: `traceID=${traceId} http.method=POST endpoint=${endpoint} status=SUCCESS`, 282 | }); 283 | 284 | res.sendStatus(201); 285 | } catch (err) { 286 | // Metrics 287 | metricBody.labels.status = '500'; 288 | responseMetric(metricBody); 289 | 290 | logEntry({ 291 | level: 'error', 292 | namespace: process.env.NAMESPACE, 293 | job: `${servicePrefix}-server`, 294 | endpointLabel: spanTag, 295 | endpoint, 296 | message: `traceID=${traceId} http.method=GET endpoint=${endpoint} status=FAILURE error="${err}"`, 297 | }); 298 | 299 | res.status(500).send(err); 300 | } 301 | }); 302 | 303 | // Generic DELETE endpoint 304 | app.delete('/:endpoint', async (req, res) => { 305 | const endpoint = req.params.endpoint; 306 | const currentSpan = api.trace.getSpan(api.context.active()); 307 | const spanContext = currentSpan.spanContext(); 308 | const traceId = spanContext.traceId; 309 | 310 | let metricBody = { 311 | labels: { 312 | method: 'DELETE', 313 | }, 314 | start: Date.now(), 315 | }; 316 | metricBody.labels[spanTag] = endpoint; 317 | 318 | if (!nameSet.includes(endpoint)) { 319 | res.status(404).send(`${endpoint} is not a valid endpoint`); 320 | metricBody.labels.status = '404'; 321 | responseMetric(metricBody); 322 | return; 323 | } 324 | 325 | if (!req.body || !req.body.name) { 326 | // Here we'd use 'respondToCall()' which would POST a metric for the response 327 | // code 328 | metricBody.labels.status = '400'; 329 | responseMetric(metricBody); 330 | } 331 | 332 | // If we're in the middle of a teardown, don't do anything 333 | if (teardownCheck({ 334 | spanContext, 335 | endpoint, 336 | method: 'DELETE', 337 | res, 338 | }) === true) { 339 | return; 340 | } 341 | 342 | // Delete a manticore name 343 | try { 344 | await databaseAction({ 345 | method: Database.DELETE, 346 | table: endpoint, 347 | name: req.body.name, 348 | }); 349 | 350 | // Metrics 351 | metricBody.labels.status = '204'; 352 | responseMetric(metricBody); 353 | 354 | logEntry({ 355 | level: 'info', 356 | namespace: process.env.NAMESPACE, 357 | job: `${servicePrefix}-server`, 358 | endpointLabel: spanTag, 359 | endpoint, 360 | message: `traceID=${traceId} http.method=DELETE endpoint=${endpoint} status=SUCCESS`, 361 | }); 362 | 363 | res.sendStatus(204); 364 | } catch (err) { 365 | // Metrics 366 | metricBody.labels.status = '500'; 367 | responseMetric(metricBody); 368 | 369 | logEntry({ 370 | level: 'error', 371 | namespace: process.env.NAMESPACE, 372 | job: `${servicePrefix}-server`, 373 | endpointLabel: spanTag, 374 | endpoint, 375 | message: `traceID=${traceId} http.method=DELETE endpoint=${endpoint} status=FAILURE error="${err}"`, 376 | }); 377 | 378 | res.status(500).send(err); 379 | } 380 | }); 381 | 382 | // Destroy the DB table and recreate it 383 | const tableWipe = async () => { 384 | const requestSpan = tracer.startSpan('server'); 385 | const { traceId } = requestSpan.spanContext(); 386 | 387 | // Create a new context for this request 388 | await api.context.with(api.trace.setSpan(api.context.active(), requestSpan), async () => { 389 | // You know, there are positives and negatives to using an event thread 390 | // based model. But when it comes to stuff like this, I sure don't miss 391 | // pthread mutices. One variable change. One. 392 | teardownInProgress = true; 393 | 394 | try { 395 | // DROP the table 396 | logEntry({ 397 | level: 'info', 398 | job: `${process.env.NAMESPACE}/${servicePrefix}-server`, 399 | namespace: process.env.NAMESPACE, 400 | message: `traceId=${traceId} message="DROPing tables..."`, 401 | }); 402 | 403 | await databaseAction({ 404 | method: Database.DROP, 405 | }); 406 | 407 | // Recreate the tables for each endpoint 408 | logEntry({ 409 | level: 'info', 410 | job: `{servicePrefix}-server`, 411 | namespace: process.env.NAMESPACE, 412 | message: `traceId=${traceId} message="CREATEing tables..."`, 413 | }); 414 | 415 | await databaseAction({ 416 | method: Database.CREATE, 417 | }); 418 | } catch(err) { 419 | logEntry({ 420 | level: 'info', 421 | job: `${servicePrefix}-server`, 422 | namespace: process.env.NAMESPACE, 423 | message: `traceId=${traceId} error="${err}"`, 424 | }); 425 | } finally { 426 | teardownInProgress = false; 427 | requestSpan.end(); 428 | } 429 | }); 430 | }; 431 | 432 | // Checks to see if there's a teardown in progress 433 | const teardownCheck = (details) => { 434 | const { spanContext, endpoint, method, res } = details; 435 | // If we're in the middle of a teardown, don't do anything. 436 | if (teardownInProgress) { 437 | logEntry({ 438 | level: 'error', 439 | namespace: process.env.NAMESPACE, 440 | job: `${servicePrefix}-server`, 441 | message: `traceID=${traceId} http.method=${method} endpoint=${endpoint} status=FAILURE error='Table is not available'`, 442 | }); 443 | res.status(500).send('Table is not available'); 444 | return true; 445 | } 446 | 447 | return false; 448 | }; 449 | 450 | // Create the DB and connect to it 451 | const startServer = async () => { 452 | const requestSpan = tracer.startSpan('server'); 453 | 454 | // Create a new context for this request 455 | await api.context.with(api.trace.setSpan(api.context.active(), requestSpan), async () => { 456 | try { 457 | logEntry({ 458 | level: 'info', 459 | job: `${servicePrefix}-server`, 460 | namespace: process.env.NAMESPACE, 461 | message: 'Installing postgres client...', 462 | }); 463 | pgClient = new Client({ 464 | host: process.env.MYTHICAL_DATABASE_HOST ?? 'mythical-database', 465 | port: Number(process.env.MYTHICAL_DATABASE_HOST_PORT) ?? 5432, 466 | user: process.env.MYTHICAL_DATABASE_USER ?? 'postgres', 467 | password: process.env.MYTHICAL_DATABASE_PASSWORD ?? 'mythical', 468 | }); 469 | 470 | await pgClient.connect(); 471 | const results = await pgClient.query(`SELECT COUNT(*) FROM pg_catalog.pg_database WHERE datname = '${spanTag}';`); 472 | if (results.rows[0].exists === false) { 473 | logEntry({ 474 | level: 'info', 475 | namespace: process.env.NAMESPACE, 476 | job: `${servicePrefix}-server`, 477 | message: 'Database entry did not exist, creating...', 478 | }); 479 | await pgClient.query(`CREATE DATABASE ${spanTag}`); 480 | } 481 | 482 | logEntry({ 483 | level: 'info', 484 | namespace: process.env.NAMESPACE, 485 | job: `${servicePrefix}-server`, 486 | message: 'Creating tables...', 487 | }); 488 | 489 | // Create the tables. 490 | await databaseAction({ 491 | method: Database.CREATE, 492 | }); 493 | 494 | // Listen to API connections. 495 | app.listen(4000); 496 | 497 | // Schedule a table wipe in the future. 498 | setInterval(() => tableWipe(), teardownTimeout); 499 | 500 | logEntry({ 501 | level: 'info', 502 | namespace: process.env.NAMESPACE, 503 | job: `${servicePrefix}-server`, 504 | message: `${servicePrefix} server up and running...`, 505 | }); 506 | } catch (err) { 507 | pgClient.end(); 508 | logEntry({ 509 | level: 'info', 510 | namespace: process.env.NAMESPACE, 511 | job: `${servicePrefix}-server`, 512 | message: `${servicePrefix} server could not start, trying again in 5 seconds... ${err}`, 513 | }); 514 | setTimeout(() => startServer(), 5000); 515 | } finally { 516 | requestSpan.end(); 517 | } 518 | }); 519 | }; 520 | 521 | // Start up the API server 522 | startServer(); 523 | })(); 524 | -------------------------------------------------------------------------------- /source/mythical-beasts-server/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "prometheus-server", 3 | "version": "1.0.1", 4 | "description": "", 5 | "main": "index.js", 6 | "scripts": { 7 | "test": "echo \"Error: no test specified\" && exit 1", 8 | "start": "node index.js" 9 | }, 10 | "author": "", 11 | "license": "ISC", 12 | "dependencies": { 13 | "@opentelemetry/api": "^1.9.0", 14 | "@opentelemetry/auto-instrumentations-node": "^0.60.1", 15 | "@opentelemetry/core": "^2.0.1", 16 | "@opentelemetry/exporter-trace-otlp-grpc": "^0.202.0", 17 | "@opentelemetry/instrumentation": "^0.202.0", 18 | "@opentelemetry/resources": "^2.0.1", 19 | "@opentelemetry/sdk-trace-base": "^2.0.1", 20 | "@opentelemetry/sdk-trace-node": "^2.0.1", 21 | "@opentelemetry/semantic-conventions": "^1.34.0", 22 | "@pyroscope/nodejs": "^0.4.5", 23 | "axios": "^1.10.0", 24 | "body-parser": "^2.2.0", 25 | "cors": "^2.8.5", 26 | "express": "^5.1.0", 27 | "influx": "^5.10.0", 28 | "pg": "^8.14.0", 29 | "prom-client": "github:voltbit/prom-client#add-openmetrics-support", 30 | "random": "^5.4.0" 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /tempo/tempo.yaml: -------------------------------------------------------------------------------- 1 | # For more information on this configuration, see the complete reference guide at 2 | # https://grafana.com/docs/tempo/latest/configuration/ 3 | 4 | # Enables result streaming from Tempo (to Grafana) via HTTP. 5 | stream_over_http_enabled: true 6 | 7 | # Configure the server block. 8 | server: 9 | # Listen for all incoming requests on port 3200. 10 | http_listen_port: 3200 11 | 12 | # The distributor receives incoming trace span data for the system. 13 | distributor: 14 | receivers: # This configuration will listen on all ports and protocols that tempo is capable of. 15 | jaeger: # The receivers all come from the OpenTelemetry collector. More configuration information can 16 | protocols: # be found there: https://github.com/open-telemetry/opentelemetry-collector/tree/main/receiver 17 | thrift_http: # 18 | grpc: # For a production deployment you should only enable the receivers you need! 19 | thrift_binary: # Note that from Tempo 2.7.0, if an endpoint if not specified, it will listen only on localhost. 20 | thrift_compact: 21 | otlp: 22 | protocols: 23 | http: 24 | endpoint: "0.0.0.0:4318" # Listen to OTLP HTTP on port 4318, on all interfaces. 25 | grpc: 26 | endpoint: "0.0.0.0:4317" # This example repository only utilises the OTLP gRPC receiver on port 4317, on all interfaces. 27 | zipkin: # Receive trace data in any supported Zipkin format. 28 | 29 | # The ingester receives data from the distributor and processes it into indices and blocks. 30 | ingester: 31 | trace_idle_period: 10s # The length of time after a trace has not received spans to consider it complete and flush it. 32 | max_block_bytes: 1_000_000 # Cut the head block when it hits this size or 33 | max_block_duration: 5m # this much time passes 34 | 35 | # The compactor block configures the compactor responsible for compacting TSDB blocks. 36 | compactor: 37 | compaction: 38 | compaction_window: 1h # Blocks in this time window will be compacted together. 39 | max_block_bytes: 100_000_000 # Maximum size of a compacted block. 40 | block_retention: 1h # How long to keep blocks. Default is 14 days, this demo system is short-lived. 41 | compacted_block_retention: 10m # How long to keep compacted blocks stored elsewhere. 42 | 43 | # Configuration block to determine where to store TSDB blocks. 44 | storage: 45 | trace: 46 | backend: local # Use the local filesystem for block storage. Not recommended for production systems. 47 | block: 48 | bloom_filter_false_positive: .05 # Bloom filter false positive rate. lower values create larger filters but fewer false positives. 49 | # Write Ahead Log (WAL) configuration. 50 | wal: 51 | path: /tmp/tempo/wal # Directory to store the the WAL locally. 52 | # Local configuration for filesystem storage. 53 | local: 54 | path: /tmp/tempo/blocks # Directory to store the TSDB blocks. 55 | # Pool used for finding trace IDs. 56 | pool: 57 | max_workers: 100 # Worker pool determines the number of parallel requests to the object store backend. 58 | queue_depth: 10000 # Maximum depth for the querier queue jobs. A job is required for each block searched. 59 | 60 | # Configures the metrics generator component of Tempo. 61 | metrics_generator: 62 | # Specifies which processors to use. 63 | processor: 64 | # Span metrics create metrics based on span type, duration, name and service. 65 | span_metrics: 66 | # Configure extra dimensions to add as metric labels. 67 | dimensions: 68 | - http.method 69 | - http.target 70 | - http.status_code 71 | - service.version 72 | # Service graph metrics create node and edge metrics for determinng service interactions. 73 | service_graphs: 74 | # Configure extra dimensions to add as metric labels. 75 | dimensions: 76 | - http.method 77 | - http.target 78 | - http.status_code 79 | - service.version 80 | # Configure the local blocks processor. 81 | local_blocks: 82 | # Ensure that metrics blocks are flushed to storage so TraceQL metrics queries against historical data. 83 | flush_to_storage: true 84 | # The registry configuration determines how to process metrics. 85 | registry: 86 | collection_interval: 5s # Create new metrics every 5s. 87 | # Configure extra labels to be added to metrics. 88 | external_labels: 89 | source: tempo # Add a `{source="tempo"}` label. 90 | group: 'mythical' # Add a `{group="mythical"}` label. 91 | # Configures where the store for metrics is located. 92 | storage: 93 | # WAL for metrics generation. 94 | path: /tmp/tempo/generator/wal 95 | # Where to remote write metrics to. 96 | remote_write: 97 | - url: http://mimir:9009/api/v1/push # URL of locally running Mimir instance. 98 | send_exemplars: true # Send exemplars along with their metrics. 99 | traces_storage: 100 | path: /tmp/tempo/generator/traces 101 | 102 | # Global override configuration. 103 | overrides: 104 | defaults: 105 | metrics_generator: 106 | processors: [service-graphs, span-metrics, local-blocks] # enables metrics generator 107 | generate_native_histograms: both 108 | --------------------------------------------------------------------------------