├── scripts ├── __init__.py ├── modules │ ├── __init__.py │ ├── proxy.py │ └── aux_services.py ├── tests │ ├── __init__.py │ └── config │ │ ├── test_apm_managed_build.yml │ │ ├── test_apm_managed.yml │ │ └── test_start_main_default.yml ├── kibana │ └── validate-ts-interfaces-against-apm-server-sample-docs │ │ ├── .prettierrc.json │ │ ├── README.md │ │ ├── tsconfig.json │ │ ├── scripts │ │ ├── setup.sh │ │ ├── clone-kibana.sh │ │ ├── helpers.ts │ │ └── download-sample-docs.ts │ │ ├── package.json │ │ └── yarn.lock ├── compose.py ├── create-api-key.sh ├── docker-summary.sh └── tls │ ├── kibana │ ├── kibana.crt │ └── kibana.key │ ├── ca │ └── ca.crt │ ├── elasticsearch │ ├── elasticsearch.crt │ └── elasticsearch.key │ └── apm-server │ ├── cert.crt │ └── key.pem ├── .flake8 ├── docker ├── opbeans-loadgen ├── dyno │ ├── __init__.py │ ├── tests │ │ ├── __init__.py │ │ ├── unit │ │ │ ├── __init__.py │ │ │ ├── index.test.js │ │ │ ├── test_docker.py │ │ │ └── test_control.py │ │ ├── conftest.py │ │ └── files │ │ │ └── docker_inspect.yml │ ├── app │ │ ├── static │ │ │ ├── dino.jpg │ │ │ └── favicon.ico │ │ ├── api │ │ │ ├── __init__.py │ │ │ └── docker.py │ │ ├── cfg.py │ │ ├── __init__.py │ │ ├── templates │ │ │ └── index.html │ │ └── range.yml │ ├── entrypoint.sh │ ├── package.json │ ├── requirements.txt │ └── Dockerfile ├── opbeans │ ├── php │ │ └── Dockerfile │ ├── node │ │ ├── sourcemaps │ │ │ └── README.md │ │ ├── Dockerfile │ │ └── entrypoint.sh │ ├── rum │ │ ├── processes.config.js │ │ ├── package.json │ │ ├── Dockerfile │ │ └── tasks.js │ ├── dotnet │ │ ├── NuGet.Config │ │ ├── Dockerfile │ │ └── run.sh │ ├── ruby │ │ ├── Dockerfile │ │ └── entrypoint.sh │ ├── frontend_nginx │ │ ├── Dockerfile │ │ ├── rum-config.template │ │ ├── default.template │ │ └── entrypoint.sh │ ├── python │ │ ├── Dockerfile │ │ └── entrypoint.sh │ ├── java │ │ ├── entrypoint.sh │ │ ├── Dockerfile │ │ └── build-agent.sh │ └── go │ │ └── Dockerfile ├── apm-server │ ├── haproxy │ │ ├── Dockerfile │ │ └── docker-entrypoint.sh │ ├── recorder │ │ ├── go.mod │ │ ├── Dockerfile │ │ └── main.go │ ├── managed │ │ ├── go.mod │ │ ├── go.sum │ │ └── Dockerfile │ ├── teeproxy │ │ └── Dockerfile │ ├── pipelines │ │ ├── default.json │ │ └── opbeans-servicemap.json │ └── Dockerfile ├── kibana_src │ ├── .dockerignore │ ├── kibana_src.yml │ └── Dockerfile ├── statsd │ ├── config.js │ ├── Dockerfile │ ├── LICENSE │ └── statsd-socket.io.js ├── toxi │ └── README.md ├── elasticsearch │ ├── service_tokens │ ├── users_roles │ ├── users │ └── roles.yml ├── tests │ ├── test_helpers.bash │ └── tests.bats ├── kibana │ ├── kibana.yml │ └── kibana-8.yml ├── filebeat │ ├── filebeat.simple.yml │ ├── filebeat.6.x-compat.yml │ └── filebeat.yml ├── intake-receiver │ └── Dockerfile ├── logstash │ ├── pipeline │ │ └── apm.conf │ └── pipeline-6.x-compat │ │ └── apm.conf ├── packetbeat │ ├── packetbeat.6.x-compat.yml │ └── packetbeat.yml ├── heartbeat │ └── heartbeat.yml ├── elastic-agent │ └── Dockerfile ├── Makefile └── metricbeat │ ├── metricbeat.yml │ └── metricbeat.6.x-compat.yml ├── images ├── dyno.png └── apm-dyno.png ├── .ci ├── scripts │ ├── yamllint │ ├── lint.sh │ ├── build-docker-all.sh │ ├── unit-tests.sh │ ├── shellcheck │ └── common.sh ├── .yamlint.yml ├── bump-go-release-version.sh ├── bump-elastic-stack-snapshot.yml ├── bump-stack-release-version.sh ├── packer_cache.sh └── bump-elastic-stack.yml ├── .backportrc.json ├── pytest.ini ├── .dockerignore ├── .gitmodules ├── .github ├── PULL_REQUEST_TEMPLATE.md └── workflows │ ├── opentelemetry.yml │ ├── bump-elastic-stack.yml │ ├── bump-elastic-stack-snapshot.yml │ ├── test-reporter.yml │ └── ci.yml ├── Dockerfile ├── .gitignore ├── .editorconfig ├── .pre-commit-config.yaml ├── requirements.txt ├── .mergify.yml ├── Makefile └── QUICKSTART.md /scripts/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /scripts/modules/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /scripts/tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.flake8: -------------------------------------------------------------------------------- 1 | [flake8] 2 | max-line-length = 120 3 | -------------------------------------------------------------------------------- /docker/opbeans-loadgen: -------------------------------------------------------------------------------- 1 | ../../opbeans-loadgen/ -------------------------------------------------------------------------------- /docker/dyno/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | -------------------------------------------------------------------------------- /docker/dyno/tests/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | -------------------------------------------------------------------------------- /docker/opbeans/php/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM opbeans/opbeans-php 2 | -------------------------------------------------------------------------------- /docker/dyno/tests/unit/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | -------------------------------------------------------------------------------- /docker/apm-server/haproxy/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM haproxy:1.9 2 | 3 | COPY docker-entrypoint.sh / 4 | -------------------------------------------------------------------------------- /images/dyno.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elastic/apm-integration-testing/main/images/dyno.png -------------------------------------------------------------------------------- /images/apm-dyno.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elastic/apm-integration-testing/main/images/apm-dyno.png -------------------------------------------------------------------------------- /docker/kibana_src/.dockerignore: -------------------------------------------------------------------------------- 1 | **/.git 2 | **/.ci 3 | **/.github 4 | **/node_modules 5 | **/kibana/target 6 | -------------------------------------------------------------------------------- /docker/opbeans/node/sourcemaps/README.md: -------------------------------------------------------------------------------- 1 | During startup, the opbeans-node container will copy sourcemaps into this folder. 2 | -------------------------------------------------------------------------------- /docker/apm-server/recorder/go.mod: -------------------------------------------------------------------------------- 1 | module githu.com/elastic/apm-integration-testing/docker/apm-server/recorder 2 | 3 | go 1.12 4 | -------------------------------------------------------------------------------- /docker/dyno/app/static/dino.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elastic/apm-integration-testing/main/docker/dyno/app/static/dino.jpg -------------------------------------------------------------------------------- /docker/dyno/app/static/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elastic/apm-integration-testing/main/docker/dyno/app/static/favicon.ico -------------------------------------------------------------------------------- /scripts/kibana/validate-ts-interfaces-against-apm-server-sample-docs/.prettierrc.json: -------------------------------------------------------------------------------- 1 | { 2 | "singleQuote": true, 3 | "semi": true 4 | } 5 | -------------------------------------------------------------------------------- /docker/apm-server/managed/go.mod: -------------------------------------------------------------------------------- 1 | module managed 2 | 3 | go 1.15 4 | 5 | require github.com/pkg/errors v0.9.2-0.20201214064552-5dd12d0cfe7f 6 | -------------------------------------------------------------------------------- /docker/dyno/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | echo "Staring server.." 3 | gunicorn -b 0.0.0.0 dyno.app:app --capture-output -t 90 -w 8 --reload 4 | -------------------------------------------------------------------------------- /docker/dyno/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "devDependencies": { 3 | "jest": "^26.6.3" 4 | }, 5 | "scripts": { 6 | "test": "jest" 7 | } 8 | } 9 | -------------------------------------------------------------------------------- /docker/dyno/tests/unit/index.test.js: -------------------------------------------------------------------------------- 1 | const sum = require('./sum'); 2 | 3 | test('adds 1 + 2 to equal 3', () => { 4 | expect(sum(1, 2)).toBe(3); 5 | }); 6 | -------------------------------------------------------------------------------- /docker/statsd/config.js: -------------------------------------------------------------------------------- 1 | { 2 | socketPort: 8127, 3 | backends: ['./statsd-socket.io'], 4 | config: true, 5 | flushInterval: 5000 6 | } 7 | 8 | -------------------------------------------------------------------------------- /.ci/scripts/yamllint: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | docker run --rm -t -v "$PWD:/yaml" -u "$(id -u):$(id -g)" docker.elastic.co/observability-ci/yamllint "$@" 4 | -------------------------------------------------------------------------------- /docker/apm-server/recorder/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang AS build 2 | 3 | WORKDIR /app 4 | COPY main.go /app 5 | RUN go build -o /usr/local/bin/apm-server . 6 | 7 | CMD apm-server 8 | -------------------------------------------------------------------------------- /docker/dyno/requirements.txt: -------------------------------------------------------------------------------- 1 | gunicorn 2 | docker 3 | pyyaml 4 | flask 5 | flask-cors 6 | Flask-Limiter 7 | Flask-Caching 8 | git+https://github.com/cachedout/toxiproxy-python.git 9 | -------------------------------------------------------------------------------- /docker/dyno/app/api/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | from flask import Blueprint 3 | 4 | bp = Blueprint('api', __name__) 5 | 6 | from dyno.app.api import control # noqa E402 7 | -------------------------------------------------------------------------------- /docker/opbeans/rum/processes.config.js: -------------------------------------------------------------------------------- 1 | module.exports = { 2 | apps : [{ 3 | name: "worker", 4 | script: "./tasks.js", 5 | instances: 1, 6 | }] 7 | }; 8 | -------------------------------------------------------------------------------- /.backportrc.json: -------------------------------------------------------------------------------- 1 | { 2 | "upstream": "elastic/apm-integration-testing", 3 | "branches": [{ "name": "7.x", "checked": true }, { "name": "6.x", "checked": true } ], 4 | "labels": ["backport"] 5 | } 6 | -------------------------------------------------------------------------------- /scripts/kibana/validate-ts-interfaces-against-apm-server-sample-docs/README.md: -------------------------------------------------------------------------------- 1 | **Install dependencies** 2 | ``` 3 | yarn 4 | ``` 5 | 6 | **Run tests** 7 | ``` 8 | yarn setup && yarn lint 9 | ``` 10 | -------------------------------------------------------------------------------- /docker/apm-server/teeproxy/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang 2 | ENV CGO_ENABLED=0 3 | # use fork to quickly pin https://github.com/chrislusf/teeproxy 4 | RUN go install github.com/graphaelli/teeproxy@latest 5 | CMD ["teeproxy"] 6 | -------------------------------------------------------------------------------- /docker/opbeans/dotnet/NuGet.Config: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | -------------------------------------------------------------------------------- /docker/kibana_src/kibana_src.yml: -------------------------------------------------------------------------------- 1 | elasticsearch.hosts: ["http://elasticsearch:9200"] 2 | elasticsearch.username: "kibana_system_user" 3 | elasticsearch.password: "changeme" 4 | server.host: "0.0.0.0" 5 | server.port: 5601 6 | -------------------------------------------------------------------------------- /.ci/.yamlint.yml: -------------------------------------------------------------------------------- 1 | extends: default 2 | 3 | rules: 4 | # 120 chars should be enough, but don't fail if a line is longer 5 | line-length: 6 | max: 120 7 | level: warning 8 | 9 | indentation: 10 | level: warning 11 | -------------------------------------------------------------------------------- /scripts/tests/config/test_apm_managed_build.yml: -------------------------------------------------------------------------------- 1 | --- 2 | args: 3 | ELASTIC_AGENT_BRANCH_OR_COMMIT: foo 4 | ELASTIC_AGENT_REPO: https://github.com/elastic/apm-server.git 5 | STACK_VERSION: 8.2.0-SNAPSHOT 6 | context: docker/elastic-agent 7 | -------------------------------------------------------------------------------- /docker/dyno/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3 2 | 3 | LABEL maintainer="mike.place@elastic.co" 4 | WORKDIR / 5 | COPY requirements.txt /dyno/requirements.txt 6 | RUN pip3 install -r dyno/requirements.txt 7 | COPY . /dyno 8 | ENTRYPOINT ["./dyno/entrypoint.sh"] 9 | -------------------------------------------------------------------------------- /docker/apm-server/managed/go.sum: -------------------------------------------------------------------------------- 1 | github.com/pkg/errors v0.9.2-0.20201214064552-5dd12d0cfe7f h1:lJqhwddJVYAkyp72a4pwzMClI20xTwL7miDdm2W/KBM= 2 | github.com/pkg/errors v0.9.2-0.20201214064552-5dd12d0cfe7f/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= 3 | -------------------------------------------------------------------------------- /pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | junit_family=xunit1 3 | markers = 4 | upgradetest: mark a test as an upgrade test. disabled by default, use --run-upgrade to enable. 5 | version: mark a test for a specific version. 6 | dyno: mark a test for the Dyno featureset. 7 | -------------------------------------------------------------------------------- /docker/opbeans/ruby/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG OPBEANS_RUBY_IMAGE=opbeans/opbeans-ruby 2 | ARG OPBEANS_RUBY_VERSION=latest 3 | FROM ${OPBEANS_RUBY_IMAGE}:${OPBEANS_RUBY_VERSION} 4 | 5 | COPY entrypoint.sh /app/entrypoint.sh 6 | 7 | CMD ["bin/boot"] 8 | ENTRYPOINT ["/app/entrypoint.sh"] 9 | -------------------------------------------------------------------------------- /docker/toxi/README.md: -------------------------------------------------------------------------------- 1 | # README 2 | 3 | This directory intentionally left empty. It is used for dynamically generated files which are produced when the compose script is run with the `--dyno` flag. 4 | 5 | Specific exemptions for file(s) in this directory exist in `.gitignore`. 6 | -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | **/*.pyc 2 | **/.images 3 | **/__pycache__ 4 | **/log 5 | **/tmp 6 | *.py[cod] 7 | *.pyc 8 | .cache 9 | .ci/ 10 | .coverage 11 | .git 12 | .github 13 | .pytest_cache 14 | /.idea 15 | apm-server 16 | docker-compose.yml 17 | docker/ 18 | htmlcov/ 19 | venv/ 20 | events/ 21 | -------------------------------------------------------------------------------- /docker/opbeans/rum/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "opbeans-frontend-loadgen", 3 | "private": true, 4 | "version": "1.0.0", 5 | "description": "Load generator for opbeans frontend app", 6 | "dependencies": { 7 | "pm2": "^5.2.0", 8 | "puppeteer": "^15.0.0" 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /scripts/kibana/validate-ts-interfaces-against-apm-server-sample-docs/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "module": "commonjs", 4 | "strict": true, 5 | "resolveJsonModule": true, 6 | "esModuleInterop": true, 7 | "moduleResolution": "node" 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /docker/statsd/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM statsd/statsd:v0.9.0 2 | RUN npm install socket.io@2.3.0 3 | COPY config.js /usr/src/app/config.js 4 | COPY statsd-socket.io.js /usr/src/app/node_modules/statsd-socket.io/lib/statsd-socket.io.js 5 | COPY statsd-socket.io.js /usr/src/app/statsd-socket.io.js 6 | EXPOSE 8127 7 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "docker/tests/test_helper/bats-assert"] 2 | path = docker/tests/test_helper/bats-assert 3 | url = https://github.com/ztombol/bats-assert 4 | [submodule "docker/tests/test_helper/bats-support"] 5 | path = docker/tests/test_helper/bats-support 6 | url = https://github.com/ztombol/bats-support 7 | -------------------------------------------------------------------------------- /docker/apm-server/managed/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:latest 2 | ENV GO111MODULE=on 3 | WORKDIR /src/apmpkg 4 | COPY . /src/apmpkg 5 | RUN CGO_ENABLED=0 go build -o /apmpkg 6 | 7 | FROM alpine 8 | RUN apk --no-cache add ca-certificates --update curl 9 | COPY --from=0 /apmpkg /apmpkg 10 | 11 | 12 | CMD ["/apmpkg"] 13 | -------------------------------------------------------------------------------- /.ci/scripts/lint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | # for details about how it works see https://github.com/elastic/apm-integration-testing#continuous-integration 3 | 4 | srcdir=$(dirname "$0") 5 | test -z "$srcdir" && srcdir=. 6 | # shellcheck disable=SC1090,SC1091 7 | . "${srcdir}/common.sh" 8 | 9 | prepareAndRunGoals lint 10 | -------------------------------------------------------------------------------- /.ci/scripts/build-docker-all.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | # for details about how it works see https://github.com/elastic/apm-integration-testing#continuous-integration 3 | 4 | srcdir=$(dirname "$0") 5 | test -z "$srcdir" && srcdir=. 6 | # shellcheck disable=SC1090,SC1091 7 | . "${srcdir}/common.sh" 8 | 9 | prepareAndRunAll build-env 10 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | ## What does this PR do? 2 | 3 | 6 | 7 | ## Why is it important? 8 | 9 | 12 | 13 | ## Related issues 14 | Closes #ISSUE 15 | -------------------------------------------------------------------------------- /docker/opbeans/node/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG OPBEANS_NODE_IMAGE=opbeans/opbeans-node 2 | ARG OPBEANS_NODE_VERSION=latest 3 | FROM ${OPBEANS_NODE_IMAGE}:${OPBEANS_NODE_VERSION} 4 | 5 | RUN apk --no-cache add rsync git 6 | COPY entrypoint.sh /app/entrypoint.sh 7 | 8 | CMD ["pm2-runtime", "ecosystem-workload.config.js"] 9 | ENTRYPOINT ["/app/entrypoint.sh"] 10 | -------------------------------------------------------------------------------- /.ci/scripts/unit-tests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | # for details about how it works see https://github.com/elastic/apm-integration-testing#continuous-integration 3 | 4 | srcdir=$(dirname "$0") 5 | test -z "$srcdir" && srcdir=. 6 | # shellcheck disable=SC1090,SC1091 7 | . "${srcdir}/common.sh" 8 | 9 | prepareAndRunGoals env-server test-compose test-helps 10 | -------------------------------------------------------------------------------- /docker/elasticsearch/service_tokens: -------------------------------------------------------------------------------- 1 | elastic/fleet-server/elastic-package-fleet-server-token:{PBKDF2_STRETCH}10000$PNiVyY96dHwRfoDszBvYPAz+mSLbC+NhtPh63dblDZU=$dAY1tXX1U5rXB+2Lt7m0L2LUNSb1q5nRaIqPNZTBxb8= 2 | elastic/kibana/elastic-package-kibana-token:{PBKDF2_STRETCH}10000$wIEFHIIIZ2ap0D0iQsyw0MfB7YuFA1bHnXAmlCoL4Gg=$YxvIJnasjLZyDQZpmFBiJHdR/CGXd5BnVm013Jty6p0= 3 | -------------------------------------------------------------------------------- /docker/opbeans/frontend_nginx/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM nginx:1.17.3 2 | 3 | COPY --from=opbeans/opbeans-frontend:latest /app/build /usr/share/nginx/html 4 | 5 | COPY default.template /etc/nginx/conf.d/default.template 6 | COPY rum-config.template /usr/share/nginx/html/rum-config.template 7 | COPY entrypoint.sh / 8 | 9 | ENTRYPOINT ["/bin/bash", "/entrypoint.sh"] 10 | -------------------------------------------------------------------------------- /scripts/kibana/validate-ts-interfaces-against-apm-server-sample-docs/scripts/setup.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | rm -rf tmp 4 | mkdir tmp 5 | mkdir tmp/apm-server-docs 6 | 7 | # download apm server samples 8 | yarn ts-node ./scripts/download-sample-docs.ts "$1" "$2" 9 | 10 | # Clone kibana and copy ts interfaces 11 | ./scripts/clone-kibana.sh "$3" "$4" 12 | -------------------------------------------------------------------------------- /docker/tests/test_helpers.bash: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bats 2 | 3 | # check dependencies 4 | ( 5 | type docker &>/dev/null || ( echo "docker is not available"; exit 1 ) 6 | type curl &>/dev/null || ( echo "curl is not available"; exit 1 ) 7 | )>&2 8 | 9 | function cleanup { 10 | docker kill "$1" &>/dev/null ||: 11 | docker rm -fv "$1" &>/dev/null ||: 12 | } 13 | -------------------------------------------------------------------------------- /.ci/scripts/shellcheck: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | 4 | # .ci/scripts is added to $PATH by apm-pipeline-library 5 | # so this script will be invoked by 6 | # https://github.com/detailyang/pre-commit-shell, which 7 | # is configured in .pre-commit-config.yaml. 8 | 9 | docker run --rm -t -v "$PWD:/mnt" -u "$(id -u):$(id -g)" docker.elastic.co/observability-ci/shellcheck "$@" 10 | -------------------------------------------------------------------------------- /docker/kibana/kibana.yml: -------------------------------------------------------------------------------- 1 | --- 2 | #server.name: kibana 3 | server.host: "0" 4 | #elasticsearch.hosts: [ "http://elasticsearch:9200" ] 5 | monitoring.ui.container.elasticsearch.enabled: true 6 | 7 | elastic.apm.active: true 8 | elastic.apm.serverUrl: "http://apm-server:8200" 9 | xpack.task_manager.monitored_stats_health_verbose_log.enabled: true 10 | 11 | logging: 12 | loggers: 13 | - context: plugins.taskManager 14 | appenders: [console] 15 | level: debug 16 | -------------------------------------------------------------------------------- /docker/elasticsearch/users_roles: -------------------------------------------------------------------------------- 1 | apm_server:apm_server_user 2 | apm_system:apm_server_user 3 | apm_user:apm_server_user,apm_user_ro 4 | beats:beats_user 5 | beats_system:beats_user,filebeat_user,heartbeat_user,metricbeat_user 6 | filebeat:filebeat_user 7 | heartbeat:heartbeat_user 8 | ingest_admin:apm_server_user 9 | kibana_system:kibana_system_user 10 | kibana_user:apm_server_user,apm_user_ro,beats_user,filebeat_user,heartbeat_user,metricbeat_user,opbeans_user 11 | metricbeat:metricbeat_user 12 | opbeans:opbeans_user 13 | superuser:admin 14 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM node:12-buster as BUILD_IMAGE 2 | WORKDIR /app 3 | RUN npm install elasticdump 4 | 5 | FROM python:3.7-buster 6 | COPY requirements.txt requirements.txt 7 | RUN pip install -q -r requirements.txt 8 | 9 | RUN useradd -U -m -s /bin/bash -d /app tester 10 | COPY . /app 11 | WORKDIR /app 12 | COPY --from=BUILD_IMAGE /app . 13 | RUN ln -s /app/node_modules/elasticdump/bin/elasticdump /usr/local/bin/elasticdump 14 | RUN ln -s /app/node_modules/elasticdump/bin/multielasticdump /usr/local/bin/multielasticdump 15 | COPY --from=BUILD_IMAGE /usr/local/bin/node /usr/local/bin/node 16 | USER tester 17 | -------------------------------------------------------------------------------- /.github/workflows/opentelemetry.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: OpenTelemetry Export Trace 3 | 4 | on: 5 | workflow_run: 6 | workflows: 7 | - bump-elastic-stack 8 | - bump-elastic-stack-snapshot 9 | - ci 10 | - test-reporter 11 | types: [completed] 12 | 13 | jobs: 14 | otel-export-trace: 15 | runs-on: ubuntu-latest 16 | steps: 17 | - uses: elastic/apm-pipeline-library/.github/actions/opentelemetry@current 18 | with: 19 | vaultUrl: ${{ secrets.VAULT_ADDR }} 20 | vaultRoleId: ${{ secrets.VAULT_ROLE_ID }} 21 | vaultSecretId: ${{ secrets.VAULT_SECRET_ID }} 22 | -------------------------------------------------------------------------------- /scripts/kibana/validate-ts-interfaces-against-apm-server-sample-docs/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "license": "Apache-2.0", 3 | "scripts": { 4 | "lint": "tsc ./tmp/apm-server-docs/*.ts --noEmit", 5 | "setup": "./scripts/setup.sh", 6 | "clone-kibana": "./scripts/clone-kibana.sh", 7 | "download-sample-docs": "./scripts/download-sample-docs.ts" 8 | }, 9 | "dependencies": { 10 | "axios": "^0.21.1", 11 | "prettier": "^1.16.4", 12 | "ts-node": "^8.0.2", 13 | "typescript": "3.7.4" 14 | }, 15 | "devDependencies": { 16 | "@types/node": "^11.9.6", 17 | "@types/prettier": "^1.16.1" 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /docker/filebeat/filebeat.simple.yml: -------------------------------------------------------------------------------- 1 | --- 2 | setup.template.settings: 3 | index.number_of_shards: 1 4 | index.codec: best_compression 5 | index.number_of_replicas: 0 6 | 7 | setup.kibana: 8 | host: "${KIBANA_HOST:kibana:5601}" 9 | 10 | output.elasticsearch: 11 | hosts: ["${ELASTICSEARCH_HOSTS:elasticsearch:9200}"] 12 | 13 | logging.json: true 14 | logging.metrics.enabled: false 15 | 16 | filebeat.prospectors: 17 | - type: log 18 | paths: 19 | - '/var/lib/docker/containers/*/*.log' 20 | json.message_key: log 21 | json.overwrite_keys: true 22 | json.keys_under_root: false 23 | json.add_error_key: true 24 | -------------------------------------------------------------------------------- /.github/workflows/bump-elastic-stack.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: bump-elastic-stack 3 | 4 | on: 5 | workflow_dispatch: 6 | schedule: 7 | - cron: '0 15 * * 1-5' 8 | 9 | permissions: 10 | contents: read 11 | 12 | jobs: 13 | bump: 14 | runs-on: ubuntu-latest 15 | steps: 16 | 17 | - uses: actions/checkout@v3 18 | 19 | - uses: elastic/apm-pipeline-library/.github/actions/updatecli@current 20 | with: 21 | vaultUrl: ${{ secrets.VAULT_ADDR }} 22 | vaultRoleId: ${{ secrets.VAULT_ROLE_ID }} 23 | vaultSecretId: ${{ secrets.VAULT_SECRET_ID }} 24 | pipeline: ./.ci/bump-elastic-stack.yml 25 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | **/*.pyc 3 | .cache 4 | **/__pycache__ 5 | **/.idea 6 | **/target 7 | *.iml 8 | **/tmp 9 | **/log 10 | **/.images 11 | .pytest_cache/ 12 | venv*/ 13 | .coverage 14 | docker-compose.yml 15 | docker/toxi/toxi.cfg 16 | htmlcov/ 17 | tests/results/ 18 | docker/opbeans/node/sourcemaps/*.map 19 | .DS_Store 20 | node_modules 21 | **/.vscode/settings.json 22 | **/obj 23 | **/bin 24 | docker-info/ 25 | .vscode/ 26 | *.swp 27 | *.bck 28 | 29 | # For the BATS testing 30 | bats-core/ 31 | target/ 32 | .tags* 33 | 34 | # For the docs generation 35 | .apm-its 36 | 37 | # For the cache java dependencies 38 | docker/java/spring/.m2 39 | 40 | events/ 41 | -------------------------------------------------------------------------------- /docker/opbeans/frontend_nginx/rum-config.template: -------------------------------------------------------------------------------- 1 | // Disable random failures 2 | // see https://github.com/elastic/opbeans-frontend/blob/472f914f5529d64ccf4aad0fc4a76ec27fa0a135/src/components/ProductDetail/index.js#L9 3 | var _mathRamdom = Math.random; 4 | Math.random = function() { 5 | return Math.abs(_mathRamdom() - 0.3); 6 | }; 7 | 8 | // see https://github.com/elastic/opbeans-frontend/blob/849a7a7/src/rum.js#L40-L55 9 | var rumConfig = { 10 | serverUrl: '{{ ELASTIC_APM_JS_BASE_SERVER_URL }}', 11 | serviceName: '{{ ELASTIC_APM_JS_BASE_SERVICE_NAME }}', 12 | serviceVersion: '{{ ELASTIC_APM_JS_BASE_SERVICE_VERSION }}', 13 | {{ EXTRA_OPTS }} 14 | } 15 | -------------------------------------------------------------------------------- /docker/opbeans/python/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG OPBEANS_PYTHON_IMAGE=opbeans/opbeans-python 2 | ARG OPBEANS_PYTHON_VERSION=latest 3 | FROM ${OPBEANS_PYTHON_IMAGE}:${OPBEANS_PYTHON_VERSION} 4 | ENV ELASTIC_APM_ENABLE_LOG_CORRELATION=true 5 | 6 | # postgresql-client is used for the dbshell command in the entrypoint 7 | RUN echo 'Acquire::Retries "5";' > /etc/apt/apt.conf.d/80-retries 8 | RUN apt-get -qq update \ 9 | && apt-get -qq install -y \ 10 | postgresql-client \ 11 | --no-install-recommends \ 12 | && rm -rf /var/lib/apt/lists/* 13 | 14 | COPY entrypoint.sh /app/ 15 | 16 | CMD ["honcho", "start", "--no-prefix"] 17 | ENTRYPOINT ["/bin/bash", "/app/entrypoint.sh"] 18 | -------------------------------------------------------------------------------- /.github/workflows/bump-elastic-stack-snapshot.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: bump-elastic-stack-snapshot 3 | 4 | on: 5 | workflow_dispatch: 6 | schedule: 7 | - cron: '0 15 * * 1-5' 8 | 9 | permissions: 10 | contents: read 11 | 12 | jobs: 13 | bump: 14 | runs-on: ubuntu-latest 15 | steps: 16 | 17 | - uses: actions/checkout@v3 18 | 19 | - uses: elastic/apm-pipeline-library/.github/actions/updatecli@current 20 | with: 21 | vaultUrl: ${{ secrets.VAULT_ADDR }} 22 | vaultRoleId: ${{ secrets.VAULT_ROLE_ID }} 23 | vaultSecretId: ${{ secrets.VAULT_SECRET_ID }} 24 | pipeline: ./.ci/bump-elastic-stack-snapshot.yml 25 | -------------------------------------------------------------------------------- /docker/intake-receiver/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG go_version=1.17.7 2 | FROM golang:${go_version} AS build 3 | ENV CGO_ENABLED=0 4 | # TODO(marclop) After https://github.com/elastic/apm-server/pull/7416 is merged, replace git clone 5 | # with 'go install https://github.com/elastic/apm-server/cmd/intake-receiver@latest'. 6 | RUN git clone --single-branch --branch f-add-intake-receiver https://github.com/marclop/apm-server /apm-server 7 | RUN cd /apm-server/cmd/intake-receiver && go build . 8 | 9 | FROM alpine 10 | COPY --from=build /apm-server/cmd/intake-receiver/intake-receiver /intake-receiver 11 | RUN apk update && apk add curl jq 12 | ENTRYPOINT [ "/intake-receiver" ] 13 | 14 | CMD [ "-host=0.0.0.0:8200" ] 15 | -------------------------------------------------------------------------------- /docker/elasticsearch/users: -------------------------------------------------------------------------------- 1 | admin:$2a$10$xiY0ZzOKmDDN1p3if4t4muUBwh2.bFHADoMRAWQgSClm4ZJ4132Y. 2 | apm_server_user:$2a$10$iTy29qZaCSVn4FXlIjertuO8YfYVLCbvoUAJ3idaXfLRclg9GXdGG 3 | apm_user_ro:$2a$10$hQfy2o2u33SapUClsx8NCuRMpQyHP9b2l4t3QqrBA.5xXN2S.nT4u 4 | beats_user:$2a$10$LRpKi4/Q3Qo4oIbiu26rH.FNIL4aOH4aj2Kwi58FkMo1z9FgJONn2 5 | filebeat_user:$2a$10$sFxIEX8tKyOYgsbJLbUhTup76ssvSD3L4T0H6Raaxg4ewuNr.lUFC 6 | heartbeat_user:$2a$10$nKUGDr/V5ClfliglJhfy8.oEkjrDtklGQfhd9r9NoFqQeoNxr7uUK 7 | kibana_system_user:$2a$10$nN6sRtQl2KX9Gn8kV/.NpOLSk6Jwn8TehEDnZ7aaAgzyl/dy5PYzW 8 | metricbeat_user:$2a$10$5PyTd121U2ZXnFk9NyqxPuLxdptKbB8nK5egt6M5/4xrKUkk.GReG 9 | opbeans_user:$2a$10$iTy29qZaCSVn4FXlIjertuO8YfYVLCbvoUAJ3idaXfLRclg9GXdGG 10 | -------------------------------------------------------------------------------- /.github/workflows/test-reporter.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ## Workflow to process the JUnit test results and add a report to the checks. 3 | name: test-reporter 4 | 5 | on: 6 | workflow_run: 7 | workflows: [ci] 8 | types: [completed] 9 | 10 | jobs: 11 | report: 12 | runs-on: ubuntu-latest 13 | steps: 14 | - uses: elastic/apm-pipeline-library/.github/actions/test-report@gh_actions 15 | with: 16 | artifact: test-results # artifact name 17 | name: JUnit Tests # Name of the check run which will be created 18 | path: "**/*.xml" # Path to test results (inside artifact .zip) 19 | reporter: java-junit # Format of test results 20 | -------------------------------------------------------------------------------- /scripts/kibana/validate-ts-interfaces-against-apm-server-sample-docs/scripts/clone-kibana.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | 4 | OWNER=${1:-elastic} 5 | BRANCH=${2:-main} 6 | 7 | echo "Cloning Kibana: ${OWNER}:${BRANCH}" 8 | 9 | cd ./tmp 10 | git clone --quiet --depth 1 -b "${BRANCH}" "https://github.com/${OWNER}/kibana.git" 11 | 12 | ### In 7.7 files moved around. 13 | ### The below section keeps backward compatibility. 14 | oldLocation=./kibana/x-pack/legacy/plugins/apm/typings/es_schemas 15 | newLocation=./kibana/x-pack/plugins/apm/typings/es_schemas 16 | location=${oldLocation} 17 | if [ -d "${newLocation}" ] ; then 18 | location=${newLocation} 19 | fi 20 | mv "${location}" ./apm-ui-interfaces 21 | rm -rf kibana 22 | -------------------------------------------------------------------------------- /docker/opbeans/frontend_nginx/default.template: -------------------------------------------------------------------------------- 1 | server { 2 | listen 3000; 3 | server_name localhost; 4 | 5 | rewrite ^/dashboard(.*)$ /; 6 | rewrite ^/products(.*)$ /; 7 | rewrite ^/orders(.*)$ /; 8 | rewrite ^/customers(.*)$ /; 9 | 10 | location /api/ { 11 | proxy_pass {{ ELASTIC_OPBEANS_API_SERVER }}; 12 | proxy_set_header Host $host; 13 | proxy_set_header X-Real-IP $remote_addr; 14 | } 15 | 16 | location / { 17 | root /usr/share/nginx/html; 18 | index index.html index.htm; 19 | } 20 | 21 | error_page 500 502 503 504 /50x.html; 22 | location = /50x.html { 23 | root /usr/share/nginx/html; 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /docker/apm-server/haproxy/docker-entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | backends=${APM_SERVER_COUNT:-1} 6 | 7 | config=/usr/local/etc/haproxy/haproxy.cfg 8 | 9 | # generate configuration file 10 | cat > $config <> $config <;` 3 | // 4 | // Converts: 5 | // 6 | // interface User { 7 | // name: string; 8 | // age: number; 9 | // address: { 10 | // street: string; 11 | // city: string; 12 | // }; 13 | // } 14 | // 15 | // To this: 16 | // 17 | // type UserWithUnknown = { 18 | // name: string; 19 | // age: number; 20 | // [key: string]: unknown; 21 | // address: { 22 | // street: string; 23 | // city: string; 24 | // [key: string]: unknown; 25 | // }; 26 | // }; 27 | export type AllowUnknownProperties = T extends object 28 | ? T extends Array ? Array> : 29 | { [P in keyof T]: AllowUnknownProperties } & { 30 | [key: string]: unknown; 31 | } 32 | : T; 33 | -------------------------------------------------------------------------------- /scripts/create-api-key.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -euo pipefail 3 | 4 | # shellcheck disable=SC2034 5 | privilege=$(curl -s -u admin:changeme -X PUT "localhost:9200/_security/privilege" -H 'Content-Type: application/json' -d' 6 | { 7 | "apm": { 8 | "write_sourcemap": { 9 | "actions": [ "sourcemap:write" ] 10 | }, 11 | "write_event": { 12 | "actions": [ "event:write" ] 13 | }, 14 | "read_agent_config": { 15 | "actions": [ "config_agent:read" ] 16 | } 17 | } 18 | } 19 | ') 20 | 21 | apiKey=$(curl -s -u admin:changeme "localhost:9200/_security/api_key" -H 'Content-Type: application/json' -d' 22 | { 23 | "name": "apm-backend", 24 | "role_descriptors": { 25 | "apm-backend": { 26 | "applications": [ 27 | { 28 | "application": "apm", 29 | "privileges": ["*"], 30 | "resources": ["*"] 31 | } 32 | ] 33 | } 34 | } 35 | } 36 | ' | jq '(.id + ":" +.api_key)') 37 | 38 | echo -n "${apiKey}" | base64 39 | -------------------------------------------------------------------------------- /.editorconfig: -------------------------------------------------------------------------------- 1 | # editorconfig.org 2 | root = true 3 | 4 | [*] 5 | indent_style = space 6 | indent_size = 2 7 | end_of_line = lf 8 | charset = utf-8 9 | trim_trailing_whitespace = true 10 | insert_final_newline = true 11 | 12 | [Jenkinsfile*] 13 | indent_style = space 14 | indent_size = 2 15 | end_of_line = lf 16 | charset = utf-8 17 | trim_trailing_whitespace = true 18 | insert_final_newline = true 19 | 20 | [*.groovy] 21 | indent_style = space 22 | indent_size = 2 23 | end_of_line = lf 24 | charset = utf-8 25 | trim_trailing_whitespace = true 26 | insert_final_newline = true 27 | 28 | [*.py] 29 | indent_style = space 30 | indent_size = 4 31 | end_of_line = lf 32 | charset = utf-8 33 | trim_trailing_whitespace = true 34 | insert_final_newline = true 35 | 36 | [*.sh] 37 | indent_style = space 38 | indent_size = 2 39 | end_of_line = lf 40 | charset = utf-8 41 | trim_trailing_whitespace = true 42 | insert_final_newline = true 43 | 44 | [{Makefile,**.mk}] 45 | # Use tabs for indentation (Makefiles require tabs) 46 | indent_style = tab 47 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: ci 2 | 3 | on: 4 | push: 5 | branches: [main] 6 | pull_request: 7 | 8 | permissions: 9 | contents: read 10 | 11 | jobs: 12 | lint: 13 | name: Run linting 14 | runs-on: ubuntu-latest 15 | 16 | steps: 17 | - uses: actions/checkout@v3 18 | 19 | - uses: actions/setup-python@v4 20 | with: 21 | python-version: 3.9 22 | 23 | - name: Lint 24 | run: .ci/scripts/lint.sh 25 | 26 | pre-commit: 27 | name: Run pre-commit 28 | runs-on: ubuntu-latest 29 | steps: 30 | - uses: elastic/apm-pipeline-library/.github/actions/pre-commit@gh_actions 31 | 32 | test: 33 | name: Run tests 34 | runs-on: ubuntu-latest 35 | needs: [lint] 36 | 37 | steps: 38 | - uses: actions/checkout@v3 39 | 40 | - name: test 41 | run: .ci/scripts/unit-tests.sh 42 | 43 | - uses: actions/upload-artifact@v3 44 | if: success() || failure() 45 | with: 46 | name: test-results 47 | path: tests/results/*junit.xml 48 | -------------------------------------------------------------------------------- /docker/dyno/app/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import os 3 | from flask import Flask, render_template, send_from_directory 4 | from flask_cors import CORS 5 | 6 | from dyno.app.cfg import Config as Cfg 7 | 8 | 9 | def create_app(config_class=Cfg): 10 | app = Flask(__name__) 11 | app.config.from_object(config_class) 12 | CORS(app) 13 | from dyno.app.api import bp as api_bp # noqa E402 14 | from dyno.app.api.docker import bp as api_docker # noqa E402 15 | app.register_blueprint(api_bp, url_prefix='/api') 16 | app.register_blueprint(api_docker, url_prefix='/api/docker') 17 | return app 18 | 19 | 20 | app = create_app() 21 | 22 | 23 | 24 | @app.route('/') 25 | def index(): 26 | return render_template("index.html") 27 | 28 | 29 | @app.route('/scratch') 30 | def scratch(): 31 | return render_template("scratch.html") 32 | 33 | 34 | @app.route('/favicon.ico') 35 | def favicon(): 36 | return send_from_directory(os.path.join(app.root_path, 'static'), 37 | 'favicon.ico', mimetype='image/vnd.microsoft.icon') 38 | -------------------------------------------------------------------------------- /scripts/docker-summary.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -euo pipefail 3 | 4 | DOCKER_IDS=$(docker ps -aq) 5 | 6 | mkdir -p docker-info 7 | 8 | for id in ${DOCKER_IDS} 9 | do 10 | echo "***********************************************************" 11 | echo "***************Docker Container ${id}***************" 12 | echo "***********************************************************" 13 | docker ps -af id="${id}" --no-trunc 14 | echo "---- docker logs ----" 15 | docker logs "${id}" | tail -n 10 || echo "It is not possible to grab the logs of ${id}" 16 | docker inspect "${id}" > docker-info/"${id}"-docker-inspect.json 17 | done 18 | echo "***********************************************************" 19 | echo "***************Docker Stats***************" 20 | echo "***********************************************************" 21 | docker stats --no-trunc --no-stream 22 | echo "*******************************************************" 23 | echo "***************Docker Containers Summary***************" 24 | echo "*******************************************************" 25 | docker ps -a 26 | -------------------------------------------------------------------------------- /docker/logstash/pipeline/apm.conf: -------------------------------------------------------------------------------- 1 | input { 2 | beats { 3 | port => 5044 4 | } 5 | 6 | kafka { 7 | id => "apm-kafka" 8 | bootstrap_servers => ["kafka:9092"] 9 | topics_pattern => "apm.*" 10 | codec => "json" 11 | } 12 | } 13 | 14 | filter { 15 | # for older libbeat/apm-server - https://github.com/elastic/apm-server/issues/1792 16 | if [@metadata][beat] == "apm-server" { 17 | mutate { 18 | replace => { "[@metadata][beat]" => "apm" } 19 | } 20 | } 21 | 22 | if [@metadata][beat] == "apm" and [processor] { 23 | mutate { 24 | add_field => { "[@metadata][index_suffix]" => "-%{[processor][event]}" } 25 | } 26 | } else { 27 | # onboarding events originally did not set processor.event - https://github.com/elastic/apm-server/pull/1159 28 | mutate { 29 | add_field => { "[@metadata][index_suffix]" => "" } 30 | } 31 | } 32 | } 33 | 34 | output { 35 | elasticsearch { 36 | hosts => ["${ELASTICSEARCH_HOSTS:elasticsearch:9200}"] 37 | index => "%{[@metadata][beat]}-%{[@metadata][version]}%{[@metadata][index_suffix]}" 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /docker/logstash/pipeline-6.x-compat/apm.conf: -------------------------------------------------------------------------------- 1 | input { 2 | beats { 3 | port => 5044 4 | } 5 | 6 | kafka { 7 | id => "apm-kafka" 8 | bootstrap_servers => ["kafka:9092"] 9 | topics_pattern => "apm.*" 10 | codec => "json" 11 | } 12 | } 13 | 14 | filter { 15 | # for older libbeat/apm-server - https://github.com/elastic/apm-server/issues/1792 16 | if [@metadata][beat] == "apm-server" { 17 | mutate { 18 | replace => { "[@metadata][beat]" => "apm" } 19 | } 20 | } 21 | 22 | if [@metadata][beat] == "apm" and [processor] { 23 | mutate { 24 | add_field => { "[@metadata][index_suffix]" => "-%{[processor][event]}" } 25 | } 26 | } else { 27 | # onboarding events originally did not set processor.event - https://github.com/elastic/apm-server/pull/1159 28 | mutate { 29 | add_field => { "[@metadata][index_suffix]" => "" } 30 | } 31 | } 32 | } 33 | 34 | output { 35 | elasticsearch { 36 | hosts => ["${ELASTICSEARCH_HOSTS:elasticsearch:9200}"] 37 | index => "%{[@metadata][beat]}-%{[@metadata][version]}%{[@metadata][index_suffix]}-%{+YYYY.MM.dd}" 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /docker/opbeans/ruby/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | set -ex 3 | if [ -f /local-install/Gemfile ]; then 4 | echo "Installing from local folder" 5 | # copy to folder inside container to ensure were not poluting the local folder 6 | cp -r /local-install ~ 7 | cd ~/local-install && bundle 8 | cd - 9 | elif [ -n "${RUBY_AGENT_VERSION}" ]; then 10 | gem install elastic-apm -v "${RUBY_AGENT_VERSION}" 11 | elif [ -n "${RUBY_AGENT_BRANCH}" ]; then 12 | gem install specific_install 13 | if [ -z "${RUBY_AGENT_REPO}" ]; then 14 | RUBY_AGENT_REPO="elastic/apm-agent-ruby" 15 | fi 16 | # This is required with the alpine version 17 | apk --no-cache add git 18 | echo "Installing ${RUBY_AGENT_REPO}:${RUBY_AGENT_BRANCH} from Github" 19 | 20 | # Support branches/tags and refs 21 | set +e 22 | if gem specific_install https://github.com/${RUBY_AGENT_REPO}.git -b "${RUBY_AGENT_BRANCH}" ; then 23 | set -e 24 | gem specific_install https://github.com/${RUBY_AGENT_REPO}.git -r "${RUBY_AGENT_BRANCH}" 25 | fi 26 | 27 | else 28 | gem install elastic-apm 29 | fi 30 | exec "$@" 31 | -------------------------------------------------------------------------------- /docker/opbeans/java/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | if [[ -f /local-install/pom.xml ]]; then 3 | echo "Using Java agent from from local folder" 4 | rm -f /app/elastic-apm-agent.jar 5 | #Extract current version without using maven which is not available in this image 6 | JAVA_AGENT_LOCAL_VERSION=$(xmllint --xpath '/*[local-name()="project"]/*[local-name()="version"]/text()' /local-install/pom.xml) 7 | 8 | cp -v "/local-install/elastic-apm-agent/target/elastic-apm-agent-${JAVA_AGENT_LOCAL_VERSION}.jar" /app/elastic-apm-agent.jar 9 | # copy to folder inside container to ensure were not polluting the local folder 10 | cp -r /local-install ~ 11 | cd ~/local-install && python setup.py install 12 | cd - 13 | elif [[ $JAVA_AGENT_VERSION ]]; then 14 | echo "Downloading Java agent $JAVA_AGENT_VERSION from maven central" 15 | rm -f /app/elastic-apm-agent.jar 16 | curl -o /app/elastic-apm-agent.jar -L "https://repo1.maven.org/maven2/co/elastic/apm/elastic-apm-agent/$JAVA_AGENT_VERSION/elastic-apm-agent-$JAVA_AGENT_VERSION.jar" 17 | else 18 | echo "Using Java agent from the docker image" 19 | fi 20 | 21 | exec "$@" 22 | -------------------------------------------------------------------------------- /docker/kibana_src/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG NODE_VERSION= 2 | FROM node:${NODE_VERSION} 3 | 4 | ARG GID=1001 5 | ARG UID=1001 6 | RUN mkdir -p /usr/share/kibana/config 7 | WORKDIR /usr/share/kibana 8 | RUN groupadd -f --gid ${GID} kibana \ 9 | && useradd --uid ${UID} --gid ${GID} --groups 0 --home-dir /usr/share/kibana --no-create-home kibana 10 | RUN chown -R kibana:0 /usr/share/kibana 11 | # Bazel is installed at global level so we need permissions on /usr/local 12 | RUN chown -R kibana:0 /usr/local 13 | USER kibana 14 | 15 | RUN git config --global user.email "none@example.com" 16 | RUN git config --global user.name "None" 17 | RUN git init && git add . && git commit -a -m "init commit" 18 | ENV HOME=/usr/share/kibana 19 | ENV NODE_OPTIONS= --max-old-space-size=4096 20 | ENV FORCE_COLOR=1 21 | ENV BABEL_DISABLE_CACHE=true 22 | 23 | EXPOSE 5601 24 | ENTRYPOINT ["/bin/bash", "-c"] 25 | CMD ["yarn kbn bootstrap && yarn start -c /usr/share/kibana/config/kibana_src.yml -c /usr/share/kibana/config/kibana.yml --no-dev-config"] 26 | 27 | 28 | HEALTHCHECK --interval=10s --timeout=5s --start-period=1m --retries=300 CMD curl -sSL http://127.0.0.1:5601/login|grep -v 'Kibana server is not ready yet' 29 | -------------------------------------------------------------------------------- /docker/statsd/LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2012 Chatham Financial Corp 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining 4 | a copy of this software and associated documentation files (the 5 | 'Software'), to deal in the Software without restriction, including 6 | without limitation the rights to use, copy, modify, merge, publish, 7 | distribute, sublicense, and/or sell copies of the Software, and to 8 | permit persons to whom the Software is furnished to do so, subject to 9 | the following conditions: 10 | 11 | The above copyright notice and this permission notice shall be 12 | included in all copies or substantial portions of the Software. 13 | 14 | THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, 15 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 16 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 17 | IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY 18 | CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 19 | TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 20 | SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 21 | -------------------------------------------------------------------------------- /docker/opbeans/python/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | # uninstall any previously installed version of elastic-apm 3 | pip uninstall -y elastic-apm 4 | if [[ -f /local-install/setup.py ]]; then 5 | echo "Installing from local folder" 6 | # copy to folder inside container to ensure were not polluting the local folder 7 | cp -r /local-install ~ 8 | cd ~/local-install && python setup.py install 9 | cd - 10 | elif [ -n "$PYTHON_AGENT_VERSION" ] && [ "$PYTHON_AGENT_VERSION" != "latest" ]; then 11 | echo "Installing version ${PYTHON_AGENT_VERSION}" 12 | pip install -q -U elastic-apm=="$PYTHON_AGENT_VERSION" 13 | elif [[ "$PYTHON_AGENT_BRANCH" ]]; then 14 | if [[ -z "${PYTHON_AGENT_REPO}" ]]; then 15 | PYTHON_AGENT_REPO="elastic/apm-agent-python" 16 | fi 17 | echo "Installing from ${PYTHON_AGENT_REPO}#${PYTHON_AGENT_BRANCH}" 18 | pip install -U "https://github.com/${PYTHON_AGENT_REPO}/archive/${PYTHON_AGENT_BRANCH}.zip" 19 | else 20 | echo "Installing latest version from PyPI" 21 | pip install -q -U elastic-apm 22 | fi 23 | rm -f celerybeat.pid 24 | python manage.py migrate 25 | python manage.py sqlsequencereset opbeans | python manage.py dbshell 26 | exec "$@" 27 | -------------------------------------------------------------------------------- /docker/opbeans/node/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | set -ex 3 | if [ -f /local-install/package.json ]; then 4 | echo "Installing elastic-apm-node from local folder (--opbeans-node-agent-local-repo)" 5 | # Copy to a folder inside container to ensure we're not polluting the 6 | # local folder. Skip possibly huge dirs to speed this up. 7 | rsync -a /local-install/ ~/local-install/ --exclude node_modules --exclude build --exclude .git 8 | # Install elastic-apm-node from this copied dir. 9 | npm install ~/local-install 10 | npm ls elastic-apm-node 11 | elif [ -n "${NODE_AGENT_VERSION}" ]; then 12 | echo "Installing ${NODE_AGENT_VERSION} from npm" 13 | npm install elastic-apm-node@"${NODE_AGENT_VERSION}" 14 | elif [ -n "${NODE_AGENT_BRANCH}" ]; then 15 | if [ -z "${NODE_AGENT_REPO}" ]; then 16 | NODE_AGENT_REPO="elastic/apm-agent-nodejs" 17 | fi 18 | echo "Installing ${NODE_AGENT_REPO}:${NODE_AGENT_BRANCH} from Github" 19 | npm install "https://github.com/${NODE_AGENT_REPO}/archive/${NODE_AGENT_BRANCH}.tar.gz" 20 | fi 21 | if [ -f /sourcemaps/README.md ]; then 22 | rm -f /sourcemaps/*.map 23 | cp -f ./client/build/static/js/*.map /sourcemaps/ 24 | chmod 0666 /sourcemaps/*.map 25 | fi 26 | exec "$@" 27 | -------------------------------------------------------------------------------- /docker/packetbeat/packetbeat.6.x-compat.yml: -------------------------------------------------------------------------------- 1 | --- 2 | packetbeat.interfaces.snaplen: 1514 3 | packetbeat.interfaces.type: af_packet 4 | packetbeat.interfaces.buffer_size_mb: 250 5 | packetbeat.protocols: 6 | - type: dns 7 | ports: [53] 8 | include_authorities: true 9 | include_additionals: true 10 | - type: http 11 | ports: [8080, 80, 8000, 5000, 3000, 8200, 9200, 5601] 12 | tags: ["http"] 13 | send_headers: true 14 | send_all_headers: true 15 | split_cookie: true 16 | send_request: true 17 | send_response: true 18 | - type: mysql 19 | ports: [3306] 20 | - type: icmp 21 | enabled: true 22 | packetbeat.flows: 23 | timeout: 30s 24 | period: 10s 25 | packetbeat.procs: 26 | enabled: true 27 | system.hostfs: "/hostfs/proc" 28 | fields: {interface: "${INTERFACE:eth0}"} 29 | processors: 30 | - add_docker_metadata: 31 | host: "unix:///var/run/docker.sock" 32 | - add_host_metadata: 33 | # extremely verbose if enabled 34 | netinfo.enabled: false 35 | xpack.monitoring.enabled: true 36 | queue.mem: 37 | events: 20000 38 | setup.kibana.host: '${KIBANA_HOST:kibana}:${KIBANA_PORT:5601}' 39 | setup.kibana.username: '${KIBANA_USERNAME:admin}' 40 | setup.kibana.password: '${KIBANA_PASSWORD:changeme}' 41 | -------------------------------------------------------------------------------- /scripts/tls/kibana/kibana.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIDMzCCAhugAwIBAgIUBOfTBk0o0Hj6tkV/jlqyf5WT2QgwDQYJKoZIhvcNAQEL 3 | BQAwNDEyMDAGA1UEAxMpRWxhc3RpYyBDZXJ0aWZpY2F0ZSBUb29sIEF1dG9nZW5l 4 | cmF0ZWQgQ0EwHhcNMjAwNDE2MTAxMjUzWhcNMjMwNDE2MTAxMjUzWjARMQ8wDQYD 5 | VQQDEwZraWJhbmEwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQChyREW 6 | c5G6AjvvXiz5B4k0ZeQjaPBHEQKcM4C76p2dhHBwbuShtpvXPlH5ZUXWLR5xc1lp 7 | BPsriz1TZruNnp0VVfap4vf7LZSlG1xraGQKD9Xf/X3L34I22Y9HTcvENAuiiDiA 8 | A87d/cZ6Ouu//eFCXQfKvSd0NqLYM8I/QK4yXWBM6mKwQtgD1bjR1uGMzEUfeXJP 9 | nDOsTbNDdH/oPRcORYlkLZj99CregkRX4+sRtQ73a+b6H6BHGfDyvdDKKp/bupOB 10 | wE9YI/s5ckT5K/QpCngQnQa1aSFnPOCqo2g3J9eoZtxyJvYWF67dO2mSNyzrJ4Sc 11 | ctdh85Q+eNBEFaW3AgMBAAGjYDBeMB0GA1UdDgQWBBRfpSam3yXElaxBBCLF5LqM 12 | cbgUrzAfBgNVHSMEGDAWgBRSFv6t+QqPJBdGPEYyAjIN045E/jARBgNVHREECjAI 13 | ggZraWJhbmEwCQYDVR0TBAIwADANBgkqhkiG9w0BAQsFAAOCAQEAH+x/IOkTo4lR 14 | iJXNItWea9J1aAc44xSXMXXvQLjSuHRdiBoAuwIJmt7B+dKv1ztlhnKVqHQqXm8d 15 | C6B0DCOT95l8c6rzGMPfZJ2S1Lv27Nn09ou829i9kEuMjZI5d/6JAQTy0LcZDUCw 16 | HFUGs+jsKyR/3V1NqWuFBQ4qhwWzkK6ff7mNx4CiHQ6z5RiQs0InMDQYD5VqMdd2 17 | HpIAJoCdokReMmNW8uDwqmSSKras2SAJieZFAFlA7VkXLuBQVXQr2r54qzz3yEHc 18 | GYNCWOYauqpsMDCKsOGGTBiyX0dH3h/2cno8kTbWR0Qt8l0YrXf9LsDunt/47Y81 19 | nazllhW62w== 20 | -----END CERTIFICATE----- 21 | -------------------------------------------------------------------------------- /docker/tests/tests.bats: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bats 2 | # shellcheck disable=SC2031,SC2030 3 | 4 | load 'test_helper/bats-support/load' 5 | load 'test_helper/bats-assert/load' 6 | load test_helpers 7 | 8 | CONTAINER="it_${DOCKERFILE//\//-}" 9 | IMAGE="docker.elastic.co/observability-ci/${CONTAINER}" 10 | 11 | @test "${DOCKERFILE} - build image" { 12 | cd "$BATS_TEST_DIRNAME/.." 13 | # Simplify the makefile as it does fail with '/bin/sh: 1: Bad substitution' in the CI 14 | if [ ! -e "${DOCKERFILE}" ] ; then 15 | DOCKERFILE="${DOCKERFILE//-//}" 16 | fi 17 | run docker build --rm -t "${IMAGE}" "${DOCKERFILE}" 18 | assert_success 19 | } 20 | 21 | @test "${DOCKERFILE} - clean test containers" { 22 | cleanup "$CONTAINER" 23 | } 24 | 25 | @test "${DOCKERFILE} - create test container" { 26 | run docker run -d --name "$CONTAINER" -P "${IMAGE}" 27 | assert_success 28 | } 29 | 30 | @test "${DOCKERFILE} - test container with 0 as exitcode" { 31 | if [ "${DOCKERFILE}" = "opbeans-go" ]; then 32 | skip "${DOCKERFILE} does require some other docker services." 33 | fi 34 | sleep 1 35 | run docker inspect -f '{{.State.ExitCode}}' "$CONTAINER" 36 | assert_output '0' 37 | } 38 | 39 | @test "${DOCKERFILE} - clean test containers afterwards" { 40 | cleanup "$CONTAINER" 41 | } 42 | -------------------------------------------------------------------------------- /scripts/tls/ca/ca.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIDSTCCAjGgAwIBAgIUY8LkKoB35T6rQiZXX6o5c+akkUMwDQYJKoZIhvcNAQEL 3 | BQAwNDEyMDAGA1UEAxMpRWxhc3RpYyBDZXJ0aWZpY2F0ZSBUb29sIEF1dG9nZW5l 4 | cmF0ZWQgQ0EwHhcNMjAwNDE2MTAxMjUzWhcNMjMwNDE2MTAxMjUzWjA0MTIwMAYD 5 | VQQDEylFbGFzdGljIENlcnRpZmljYXRlIFRvb2wgQXV0b2dlbmVyYXRlZCBDQTCC 6 | ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJ6MwCwJnSd4z3As3WXoK/60 7 | HDyVZlJjxp4e9w76U0xkJ9it0ofD29Gc7xV207bz6mef1pScQN6Rmc+B93FyMk8N 8 | A+pCiS38YJtXLg/wFe+JBNyVMzvniY4xCnYtuf+byyMPKBXyjRpqdQnmTYeS9Oc5 9 | 3B24BjDGriy3zGHmFTMfdhuT9o3sFlPA57GwJqYnoX6lhMg9EvXgx/ECO495/wO3 10 | 25A3oGJR7hpeWR7rtE/JuzhJYLFPcWS+3srdY3Elv3BorjJiPgBpvA8wOdXSoT9e 11 | HT1HYZaEyQl+fv4o+Oz+IhjYRC+0GpsKwNmzZUgl4SXIxrV8qRulmcOmQi9QSd0C 12 | AwEAAaNTMFEwHQYDVR0OBBYEFFIW/q35Co8kF0Y8RjICMg3TjkT+MB8GA1UdIwQY 13 | MBaAFFIW/q35Co8kF0Y8RjICMg3TjkT+MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZI 14 | hvcNAQELBQADggEBAJOi2K7jDb+c3w1tZ3nxxRdLf2mZwYQtBBB49j/luNLPpA6N 15 | vceiij7S7DT3DeKFzZ457w4efpLuD02bqyx2cWqqlcsFKyMA03Q201HVfORaAwTG 16 | hHZ0IJFR/vVoT1IlUp6UW5/m0YKxLY7KLpt/CJC27arp45ZzkdmDXXK9bbipYZ7y 17 | Za+8VzrYPw581E7dUd4c1xlclqeueX1gBGaflP7YOzNu3yLJWXEOdrZv3Db4daR6 18 | 1tOh15im1bjjU7sJVEcaQYHZ7NjDxUEuqSeUse4wKHfugswDNnGlsowEmazhz0lU 19 | mo3aHbxCioBQYoCKSOVNib0u+zf0UrFFdhOC0Zw= 20 | -----END CERTIFICATE----- 21 | -------------------------------------------------------------------------------- /scripts/tls/elasticsearch/elasticsearch.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIDQTCCAimgAwIBAgIUHk8IWZxA3PE1XPPClAmWL8oQSQMwDQYJKoZIhvcNAQEL 3 | BQAwNDEyMDAGA1UEAxMpRWxhc3RpYyBDZXJ0aWZpY2F0ZSBUb29sIEF1dG9nZW5l 4 | cmF0ZWQgQ0EwHhcNMjAwNDE2MTAxMjUzWhcNMjMwNDE2MTAxMjUzWjAYMRYwFAYD 5 | VQQDEw1lbGFzdGljc2VhcmNoMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC 6 | AQEAq6aOA3JNw/vfxLkk4+1vu0GlcuB4Z/dggyuvagFEYpluIEULBWXeKw9109+G 7 | VHDHadzrS4e0A4hxcGZNogaYpkpc0LK8NE7fx4Ei6foCVJ9w1OxzXKoT2kmCyULs 8 | n176scFulFYw7sMTeOdwMgiBUYwo6rR2rsb8lPrw3MUWjAncvIxuivY2XRecb54A 9 | ZaUAAT4+my3rX4kwvbbgSzQo490ioZhQCO1RJ0TZ22EqgBaAypAuSm+9wAKQS/ku 10 | +FNwAOiEUiynpBjo8tncmKci3udz/yAvMweeHrNmma7dCjLZgY/8PzK/jrBzOIYH 11 | 2K9UndmlugWQk68wLkL7btkJOQIDAQABo2cwZTAdBgNVHQ4EFgQUsXg82HIGKy1U 12 | rhoTqOj3dH2q7qMwHwYDVR0jBBgwFoAUUhb+rfkKjyQXRjxGMgIyDdOORP4wGAYD 13 | VR0RBBEwD4INZWxhc3RpY3NlYXJjaDAJBgNVHRMEAjAAMA0GCSqGSIb3DQEBCwUA 14 | A4IBAQAPHV0kfXw7/L18dvMypeYDLkFDwoLK7dWZYce27asNcYqeicdiT/cYP0ts 15 | s6tPLTZ7fMpbKI6WUPuHzh+oQ+x8/QjVSjO6bp5mIOKKr9gZAkZ0Ctb4ImZqRcgO 16 | 2yKsdTtixOdo4K/Teq1LCHR7DfD+hWsb+avO5MhL19y0o03L/RC4HBhh9EHJzJ3R 17 | Cf7FaSRzJUn1lQr6xIMZExYjobokyPsqF1YiqMphrOxbU1R5XxhNC46Ya9Z04Lpg 18 | atkAyT178Hxf9RzH1qCYGZlM5mGLfnHRM9b6DuhWfMO1hSlBMlgHG1JMJtsi5jRs 19 | o0um95TzUTf9GxicU+Ojazj/gmCR 20 | -----END CERTIFICATE----- 21 | -------------------------------------------------------------------------------- /docker/packetbeat/packetbeat.yml: -------------------------------------------------------------------------------- 1 | --- 2 | packetbeat.interfaces.snaplen: 1514 3 | packetbeat.interfaces.type: af_packet 4 | packetbeat.interfaces.buffer_size_mb: 250 5 | packetbeat.protocols: 6 | - type: dns 7 | ports: [53] 8 | include_authorities: true 9 | include_additionals: true 10 | - type: http 11 | ports: [8080, 80, 8000, 5000, 3000, 8200, 9200, 5601] 12 | tags: ["http"] 13 | send_headers: true 14 | send_all_headers: true 15 | split_cookie: true 16 | send_request: true 17 | send_response: true 18 | - type: mysql 19 | ports: [3306] 20 | - type: icmp 21 | enabled: true 22 | packetbeat.flows: 23 | timeout: 30s 24 | period: 10s 25 | packetbeat.procs: 26 | enabled: true 27 | system.hostfs: "/hostfs/proc" 28 | fields: {interface: "${INTERFACE:eth0}"} 29 | processors: 30 | - add_docker_metadata: 31 | host: "unix:///var/run/docker.sock" 32 | - add_host_metadata: 33 | # extremely verbose if enabled 34 | netinfo.enabled: false 35 | monitoring.enabled: true 36 | queue.mem: 37 | events: 20000 38 | setup.kibana.host: '${KIBANA_HOST:kibana}:${KIBANA_PORT:5601}' 39 | setup.kibana.username: '${KIBANA_USERNAME:admin}' 40 | setup.kibana.password: '${KIBANA_PASSWORD:changeme}' 41 | 42 | http.enabled: true 43 | http.host: localhost 44 | http.port: 5066 45 | -------------------------------------------------------------------------------- /scripts/tests/config/test_apm_managed.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apm-server: 3 | container_name: localtesting_8.2.0_apm-managed 4 | image: docker.elastic.co/beats/elastic-agent:8.2.0-SNAPSHOT 5 | depends_on: 6 | kibana: 7 | condition: service_healthy 8 | environment: 9 | FLEET_ELASTICSEARCH_HOST: "http://elasticsearch.example.com:9200" 10 | FLEET_SERVER_ELASTICSEARCH_INSECURE: '1' 11 | FLEET_SERVER_ENABLE: '1' 12 | FLEET_SERVER_HOST: 0.0.0.0 13 | FLEET_SERVER_INSECURE_HTTP: '1' 14 | FLEET_SERVER_POLICY_ID: fleet-server-apm-policy 15 | FLEET_SERVER_PORT: '8220' 16 | FLEET_SERVER_SERVICE_TOKEN: foo_token_server 17 | KIBANA_FLEET_HOST: "http://kibana.example.com:5601" 18 | KIBANA_FLEET_SERVICE_TOKEN: foo_token_kibana 19 | KIBANA_FLEET_SETUP: '1' 20 | healthcheck: 21 | test: 22 | - CMD 23 | - /bin/true 24 | labels: 25 | - co.elastic.apm.stack-version=8.2.0 26 | logging: 27 | driver: json-file 28 | options: 29 | max-file: '5' 30 | max-size: 2m 31 | ports: 32 | - 127.0.0.1:8220:8220 33 | - 127.0.0.1:8201:8200 34 | volumes: 35 | - /var/run/docker.sock:/var/run/docker.sock 36 | - ./scripts/tls/apm-server/cert.crt:/usr/share/apm-server/config/certs/tls.crt 37 | - ./scripts/tls/apm-server/key.pem:/usr/share/apm-server/config/certs/tls.key 38 | -------------------------------------------------------------------------------- /docker/elasticsearch/roles.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apm_server: 3 | cluster: ['manage_ilm', 'manage_security', 'manage_api_key'] 4 | indices: 5 | - names: ['apm-*', 'logs-apm*', 'metrics-apm*', 'traces-apm*'] 6 | privileges: ['write', 'create_index', 'manage', 'manage_ilm'] 7 | applications: 8 | - application: 'apm' 9 | privileges: ['sourcemap:write', 'event:write', 'config_agent:read'] 10 | resources: '*' 11 | beats: 12 | cluster: ['manage_index_templates', 'monitor', 'manage_ingest_pipelines', 'manage_ilm', 'manage_security', 'manage_api_key'] 13 | indices: 14 | - names: ['filebeat-*', 'shrink-filebeat-*'] 15 | privileges: ['all'] 16 | filebeat: 17 | cluster: ['manage_index_templates', 'monitor', 'manage_ingest_pipelines', 'manage_ilm'] 18 | indices: 19 | - names: ['filebeat-*', 'shrink-filebeat-*'] 20 | privileges: ['all'] 21 | heartbeat: 22 | cluster: ['manage_index_templates', 'monitor', 'manage_ingest_pipelines', 'manage_ilm'] 23 | indices: 24 | - names: ['heartbeat-*', 'shrink-heartbeat-*'] 25 | privileges: ['all'] 26 | metricbeat: 27 | cluster: ['manage_index_templates', 'monitor', 'manage_ingest_pipelines', 'manage_ilm'] 28 | indices: 29 | - names: ['metricbeat-*', 'shrink-metricbeat-*'] 30 | privileges: ['all'] 31 | opbeans: 32 | indices: 33 | - names: ['opbeans-*'] 34 | privileges: ['write', 'read'] 35 | -------------------------------------------------------------------------------- /docker/heartbeat/heartbeat.yml: -------------------------------------------------------------------------------- 1 | --- 2 | output.elasticsearch: 3 | hosts: ["${ELASTICSEARCH_HOSTS:elasticsearch:9200}"] 4 | 5 | setup.kibana: 6 | host: "${KIBANA_HOST:kibana:5601}" 7 | 8 | processors: 9 | - add_observer_metadata: 10 | 11 | heartbeat.monitors: 12 | - type: icmp 13 | schedule: '*/5 * * * * * *' 14 | hosts: ["google.com"] 15 | - type: tcp 16 | schedule: '@every 5s' 17 | hosts: ["myhost:7"] # default TCP Echo Protocol 18 | check.send: "Check" 19 | check.receive: "Check" 20 | - type: http 21 | schedule: '@every 5s' 22 | urls: ["https://www.elastic.co/"] 23 | check.response.status: 200 24 | heartbeat.scheduler: 25 | limit: 10 26 | 27 | heartbeat.autodiscover: 28 | providers: 29 | - type: docker 30 | templates: 31 | - condition: 32 | contains: 33 | docker.container.image: redis 34 | config: 35 | - type: tcp 36 | hosts: ["${data.host}:${data.port}"] 37 | schedule: "@every 1s" 38 | timeout: 1s 39 | - condition: 40 | contains: 41 | docker.container.image: opbeans 42 | config: 43 | - type: http 44 | urls: ["http://${data.host}:${data.port}"] 45 | schedule: "@every 5s" 46 | timeout: 1s 47 | 48 | http.enabled: true 49 | http.host: localhost 50 | http.port: 5066 51 | -------------------------------------------------------------------------------- /docker/apm-server/pipelines/default.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "id": "apm", 4 | "body": { 5 | "description" : "Default enrichment for APM events", 6 | "processors" : [ 7 | { 8 | "pipeline": { 9 | "name": "apm_user_agent" 10 | } 11 | }, 12 | { 13 | "pipeline": { 14 | "name": "apm_user_geo" 15 | } 16 | } 17 | ] 18 | } 19 | }, 20 | { 21 | "id": "apm_user_agent", 22 | "body": { 23 | "description" : "Add user agent information for APM events", 24 | "processors" : [ 25 | { 26 | "user_agent" : { 27 | "field": "user_agent.original", 28 | "target_field": "user_agent", 29 | "ignore_missing": true, 30 | "ignore_failure": true 31 | } 32 | } 33 | ] 34 | } 35 | }, 36 | { 37 | "id": "apm_user_geo", 38 | "body": { 39 | "description" : "Add user geo information for APM events", 40 | "processors" : [ 41 | { 42 | "geoip" : { 43 | "database_file": "GeoLite2-City.mmdb", 44 | "field": "client.ip", 45 | "target_field": "client.geo", 46 | "ignore_missing": true, 47 | "on_failure": [ 48 | { 49 | "remove": { 50 | "field": "client.ip", 51 | "ignore_missing": true, 52 | "ignore_failure": true 53 | } 54 | } 55 | ] 56 | } 57 | } 58 | ] 59 | } 60 | } 61 | ] 62 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/pre-commit/pre-commit-hooks 3 | rev: v2.2.3 4 | hooks: 5 | - id: check-case-conflict 6 | exclude: ^target/ 7 | - id: check-executables-have-shebangs 8 | exclude: (^scripts/kibana/validate-ts-interfaces-against-apm-server-sample-docs/scripts/download-sample-docs.ts$|^target/) 9 | - id: check-json 10 | exclude: ^target/ 11 | - id: check-merge-conflict 12 | exclude: ^target/ 13 | - id: check-xml 14 | exclude: ^target/ 15 | - id: end-of-file-fixer 16 | exclude: ^target/ 17 | 18 | - repo: https://github.com/detailyang/pre-commit-shell.git 19 | rev: master 20 | hooks: 21 | - id: shell-lint 22 | name: "Shellscript: lint" 23 | stages: [commit] 24 | types: [shell] 25 | exclude: (^mvnw$|^target/) 26 | 27 | - repo: https://github.com/adrienverge/yamllint.git 28 | rev: v1.25.0 29 | hooks: 30 | - id: yamllint 31 | name: "Yaml: lint" 32 | args: ['-c','.ci/.yamlint.yml'] 33 | stages: [commit] 34 | types: [yaml] 35 | exclude: (^.pre-commit-config.yaml$|^.ci/.yamlint.yml$|^target/) 36 | 37 | - repo: https://github.com/elastic/apm-pipeline-library 38 | rev: current 39 | hooks: 40 | - id: check-bash-syntax 41 | - id: check-abstract-classes-and-trait 42 | - id: check-jsonslurper-class 43 | - id: check-unicode-non-breaking-spaces 44 | - id: remove-unicode-non-breaking-spaces 45 | - id: check-en-dashes 46 | - id: remove-en-dashes 47 | -------------------------------------------------------------------------------- /docker/opbeans/java/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG OPBEANS_JAVA_IMAGE=opbeans/opbeans-java 2 | ARG OPBEANS_JAVA_VERSION=latest 3 | # setting --build-arg JAVA_AGENT_BRANCH= causes the agent to be built from source 4 | # instead of using the agent which comes pre-built with opbeans-java:latest 5 | ARG JAVA_AGENT_REPO=elastic/apm-agent-java 6 | ARG JAVA_AGENT_BRANCH= 7 | 8 | FROM maven:3.6.3-adoptopenjdk-11 9 | ENV ELASTIC_APM_ENABLE_LOG_CORRELATION=true 10 | ENV ELASTIC_APM_LOG_LEVEL=DEBUG 11 | 12 | RUN mkdir /agent \ 13 | # making sure there is at least one file to COPY (otherwise docker complains) 14 | && touch /agent/ignore 15 | COPY build-agent.sh . 16 | # noop if JAVA_AGENT_BRANCH is not set 17 | RUN ./build-agent.sh "${JAVA_AGENT_REPO}" "${JAVA_AGENT_BRANCH}" 18 | 19 | FROM ${OPBEANS_JAVA_IMAGE}:${OPBEANS_JAVA_VERSION} 20 | # replaces the /agent/elastc-apm-agent.jar if it has been built by build-agent.sh 21 | COPY --from=0 /agent/* /app/ 22 | COPY entrypoint.sh /app/entrypoint.sh 23 | CMD java -javaagent:/app/elastic-apm-agent.jar -Dspring.profiles.active=customdb\ 24 | -Dserver.port=${OPBEANS_SERVER_PORT:-3002}\ 25 | -Dspring.datasource.url=${DATABASE_URL:-jdbc:postgresql://postgres/opbeans?user=postgres&password=verysecure}\ 26 | -Dspring.datasource.driverClassName=${DATABASE_DRIVER:-org.postgresql.Driver}\ 27 | -Dspring.jpa.database=${DATABASE_DIALECT:-POSTGRESQL}\ 28 | -jar /app/app.jar 29 | ENTRYPOINT ["/bin/bash", "/app/entrypoint.sh"] 30 | -------------------------------------------------------------------------------- /.ci/bump-elastic-stack-snapshot.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bump elastic-stack to latest snapshot version 3 | 4 | actions: 5 | default: 6 | title: '[updatecli] update elastic stack version for testing {{ source "latestVersion" }}' 7 | kind: github/pullrequest 8 | spec: 9 | labels: 10 | - automation 11 | - dependencies 12 | - backport-skip 13 | scmid: default 14 | 15 | scms: 16 | default: 17 | kind: github 18 | spec: 19 | user: '{{ requiredEnv "GIT_USER" }}' 20 | email: '{{ requiredEnv "GIT_EMAIL" }}' 21 | owner: elastic 22 | repository: apm-integration-testing 23 | token: '{{ requiredEnv "GITHUB_TOKEN" }}' 24 | username: '{{ requiredEnv "GIT_USER" }}' 25 | branch: main 26 | 27 | sources: 28 | latestVersion: 29 | name: Get latest snapshot build for main 30 | kind: json 31 | spec: 32 | file: https://storage.googleapis.com/artifacts-api/snapshots/main.json 33 | key: .build_id 34 | 35 | major-minor-patch: 36 | name: Get major-minor-patch version 37 | kind: shell 38 | dependson: 39 | - latestVersion 40 | transformers: 41 | - findsubmatch: 42 | pattern: '^(\d+.\d+.\d+)-.+$' 43 | captureindex: 1 44 | spec: 45 | command: echo {{ source "latestVersion" }} 46 | 47 | targets: 48 | update-cli: 49 | name: "Update cli.py - main" 50 | kind: file 51 | sourceid: major-minor-patch 52 | scmid: default 53 | spec: 54 | file: scripts/modules/cli.py 55 | matchpattern: '("main"|"master"): "[0-9]+.[0-9]+.[0-9]+"' 56 | replacepattern: '$1: "{{ source "major-minor-patch" }}"' 57 | -------------------------------------------------------------------------------- /docker/opbeans/java/build-agent.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -xe 3 | JAVA_AGENT_REPO=${1} 4 | JAVA_AGENT_BRANCH=${2} 5 | 6 | ARTIFACT_ID=elastic-apm-agent 7 | 8 | function mavenRun() { 9 | ## If settings.xml file exists and there is `ci` profile 10 | SETTINGS=.ci/settings.xml 11 | if [ -e ${SETTINGS} ] ; then 12 | if grep -q 'ci' ${SETTINGS} ; then 13 | export MAVEN_CONFIG="-s ${SETTINGS} -Pci ${MAVEN_CONFIG}" 14 | fi 15 | fi 16 | mvn -q --batch-mode \ 17 | -DskipTests=true \ 18 | -Dmaven.javadoc.skip=true \ 19 | -Dhttps.protocols=TLSv1.2 \ 20 | -Dmaven.wagon.http.retryHandler.count=10 \ 21 | -Dmaven.wagon.httpconnectionManager.ttlSeconds=25 \ 22 | -Dorg.slf4j.simpleLogger.log.org.apache.maven.cli.transfer.Slf4jMavenTransferListener=warn \ 23 | -Dmaven.repo.local="${M2_REPOSITORY_FOLDER}" \ 24 | "$@" 25 | } 26 | 27 | if [ -n "${JAVA_AGENT_BRANCH}" ] ; then 28 | # build agent from source, install to ~/.m2 repo 29 | git clone "https://github.com/${JAVA_AGENT_REPO}.git" /apm-agent-java 30 | cd /apm-agent-java 31 | git fetch -q origin '+refs/pull/*:refs/remotes/origin/pr/*' 32 | git checkout "${JAVA_AGENT_BRANCH}" 33 | 34 | mvn dependency:go-offline --fail-never -q -B 35 | if ! mavenRun clean install ; then 36 | echo 'Sleep and try again' 37 | sleep 5 38 | mavenRun install 39 | fi 40 | # shellcheck disable=SC2016 41 | VERSION=$(mvn -q -Dexec.executable="echo" -Dexec.args='${project.version}' --non-recursive org.codehaus.mojo:exec-maven-plugin:1.3.1:exec) 42 | export VERSION="${VERSION}" 43 | cp "/root/.m2/repository/co/elastic/apm/${ARTIFACT_ID}/${VERSION}/${ARTIFACT_ID}-${VERSION}.jar" /agent/elastic-apm-agent.jar 44 | fi 45 | -------------------------------------------------------------------------------- /scripts/tls/apm-server/cert.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIEpjCCAo4CCQDR9oXvJbopHjANBgkqhkiG9w0BAQsFADAVMRMwEQYDVQQDDAph 3 | cG0tc2VydmVyMB4XDTE5MTExOTE1MjE0NVoXDTI5MTExNjE1MjE0NVowFTETMBEG 4 | A1UEAwwKYXBtLXNlcnZlcjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIB 5 | ANduj3tyeBIHj0Bf5aKMRImhRbkAaQ2p6T0WsHKlicd1P4/D5l783+vVsbwprRqR 6 | qXAUsUWcUSYJXBX1qtC2MtKqi4xYUTAyQV5dgrMoCV+vtZY31SK4kolumd1vVMh+ 7 | po+IwueLvLMFK1tQGIXlJblSDYVauIt5rp79IIhWOY/YpcQy9RaxykljTYTbPjLW 8 | m3T92bow1nLh5GL3ThJEAkLO+hkJv9716+YRWYtPcojiGzpLjFgF50MoP4Lilm9U 9 | r2tBnqpvb2PwE1kkly8DDBtcg+HM4tgGsbdWo2Pgp82ARV4DL+JlNJ+SVQZAmTbc 10 | 3LMwxnUJtuKMeh2rwb9HOyuONXfF1PiEzyDhAlabyS6toAGy1mlMAop1ClO1wV5O 11 | Ayy47TeD6ziNyMKB7/XHdW4rb16K6j6EV27Bg2ZK6Vrfkwm3aRbpztfVRMX+HMUp 12 | ktH+V2OwJoP7l7lzw/q8yMdopG57zRJa1dx8NWP/UKi8Ej+87DYyWJODiNHD7PM7 13 | 9vfd47lNcWxw+p7ntEpnn6EeW2r7SlmfhtdIxL2DiTiKAq9Ktyi9cFnGnDfSDJST 14 | T1G1vIDdG33Vt2Y5+wqzCGbYyMsAOaMdXZSeniXXFR4GX7iz+AGoKojBbmoo9VqP 15 | mvbudNU+ysha4IJvTfOczJZgstxCXG+MXbEXFSgysImFAgMBAAEwDQYJKoZIhvcN 16 | AQELBQADggIBAFh2YxRT6PaAXDq38rm25I91fCP9PzVPDuIkn9wl85e7avuh6FZi 17 | R0nQG6+lB1i8XSm9UMl9+ISjE+EQqry6KB6mDsakGOsDuEUdZiw3sGJIUWQkQArB 18 | ym5DqxKpeZBeVHBxnrEbQBV8s0j8uxd7X1E0ImfMKbKfNr/B5qPRXkREvydLWYvq 19 | 8yMcUPu1MiZFUgAGr9Py39kW3lbRPWZii/2bN8AB9h6gAhq5TiennfgJZsRiuSta 20 | w/TmOcAuz4e/KPIzfvL/YCWbLyJ2vrIQeOc4N7jZfqMmLKgYCRyjI7+amfuyKPBW 21 | J4psfJ0ssHdTxAUK65vghJ2s6FLvU3HoxzetZsJp5kj6CKYaFYkB4NkkYnlY8MP/ 22 | T68oOmdYwwwrcBmDtZwoppRb5zhev5k3aykgZ/B/vqVJE9oIPkp/7wqEP1WqSiUe 23 | AgyQBu8UN4ho2Rf6nZezZ4cjW/0WyhGOHQBFmwPI2MBGsQxF2PF4lKkJtaywIEm7 24 | 4UsEQYK7Hf2J2OccWGvfo5HZ5tsSbuOGAf0bfHfaBQBsvzWet+TO6XX9VrWjnAKl 25 | bH+mInmnd9v2oABFl9Djv/Cw+lEAxxkCTW+DcwdEFJREPab5xhQDEpQQ/Ef0ihvg 26 | /ZtJQeoOYfrLN6K726QmoRWxvqxLyWK3gztcO1svHqr/cMt3ooLJEaqU 27 | -----END CERTIFICATE----- 28 | -------------------------------------------------------------------------------- /scripts/tls/kibana/kibana.key: -------------------------------------------------------------------------------- 1 | -----BEGIN RSA PRIVATE KEY----- 2 | MIIEogIBAAKCAQEAockRFnORugI7714s+QeJNGXkI2jwRxECnDOAu+qdnYRwcG7k 3 | obab1z5R+WVF1i0ecXNZaQT7K4s9U2a7jZ6dFVX2qeL3+y2UpRtca2hkCg/V3/19 4 | y9+CNtmPR03LxDQLoog4gAPO3f3Gejrrv/3hQl0Hyr0ndDai2DPCP0CuMl1gTOpi 5 | sELYA9W40dbhjMxFH3lyT5wzrE2zQ3R/6D0XDkWJZC2Y/fQq3oJEV+PrEbUO92vm 6 | +h+gRxnw8r3Qyiqf27qTgcBPWCP7OXJE+Sv0KQp4EJ0GtWkhZzzgqqNoNyfXqGbc 7 | cib2Fheu3Ttpkjcs6yeEnHLXYfOUPnjQRBWltwIDAQABAoIBABzXPi+VbJ4PNeYH 8 | ENdYC5KGs3PIikTw+FkmKUXWnkBtUqoIasOVTRTC7FQcRKXYpzusR30cQTZS/Ry4 9 | DKlDZFdgWreQWYqyZnREe2Y+Vs8gYY0roFhOwOTMRel9Kd0RNh3EUOYpR6B1VAOI 10 | K2yvco+lC/WNYNIrxz3phDBIdIVPrS947rpKP7NeUyjBZKJ7VXaB4ji+a3RVWLti 11 | JvCBFrw9ed7hkO6kascFgEiqj3UVumHlt9mhKS0GaNgUX3keb6rUmnQZQ8kei/bH 12 | dYJb+mFIcp1Go3gzYtB9Rdiyk4PgwOOXIKEj3p9kEa1La5wq+8Gx+25TEpIVZzJD 13 | dPM9FekCgYEA372BIiPYbGNchCb5ulGO5lxEhbKL+J+Epe/VL939IT2zGerLZP/v 14 | UpclV67kynM6i7HB+MTDY4WJNnCvdNakgRokno2JKnhiWmpj2TkTAtDaNoorPdW5 15 | 4hFCvfwtQyxzZ7bB1Zu+QO/mssSST1NYsL0Jhwdpu9KnQgxV1k9GXf0CgYEAuRy9 16 | +26/Q7uwFYzl2AWInihu9CM3tkoZmYxxcYHpYUPNtJlo3lU+YITi44p/jU+tpUQg 17 | szaWTxIUbHI1ddc3IaSL08N3i+WOST2YESAmVjjyDK3WR4011Ok/N5u3orK+UyTP 18 | KLU5GP9N0dd4PY4cyHam9fjQB1pyT4elQvPSpsMCgYAl+TDYjYLm6npCR6cs0zjQ 19 | qipLEsGk4dYOn8TsCd4zbRhftrTsoG+K8fI5jopwPdxa50GHqkWLN5fRMJDm4KMs 20 | 8jXy3wy/s4zl0Za+wGwUl0Ph69No/EQ/NTTBr/Iyq3Gu0ZrWWUeSF+Vsw+YdJoUw 21 | AfWDAGQWp6sRj6xl4jCMNQKBgHwQu3QM4bvvyzBmYpXJ/6qgxtxxziEqwMWcz1uY 22 | CmbaHsBwIU0CLPBboAWJCbwYLvWZF6GFcHBGWdHlKdd1CspNkjm3xCPLEvYbhhPU 23 | TF11hSTiTb9e8Tllqq0GfKlayxzGiuFptH8AfZOqqjf3S2s/ut8Yh7Sl9f8/lo+t 24 | Av4pAoGAfimSpycFmBP6JdntSoPWcM5t/Rq2tGkDO5P/T6D1N5q7cfumYC4H0OOh 25 | 9HQhC4hSnAuzYpr1UIpm7VJoN55ZslAcRMDknhT/HSkT+2akwhGkgadF6tBp7a3Q 26 | 9k2XCJ8PoaCwKvV98kZh4zsccIfxAh9lPyAvoDhoAExTmyyyisk= 27 | -----END RSA PRIVATE KEY----- 28 | -------------------------------------------------------------------------------- /scripts/tls/elasticsearch/elasticsearch.key: -------------------------------------------------------------------------------- 1 | -----BEGIN RSA PRIVATE KEY----- 2 | MIIEowIBAAKCAQEAq6aOA3JNw/vfxLkk4+1vu0GlcuB4Z/dggyuvagFEYpluIEUL 3 | BWXeKw9109+GVHDHadzrS4e0A4hxcGZNogaYpkpc0LK8NE7fx4Ei6foCVJ9w1Oxz 4 | XKoT2kmCyULsn176scFulFYw7sMTeOdwMgiBUYwo6rR2rsb8lPrw3MUWjAncvIxu 5 | ivY2XRecb54AZaUAAT4+my3rX4kwvbbgSzQo490ioZhQCO1RJ0TZ22EqgBaAypAu 6 | Sm+9wAKQS/ku+FNwAOiEUiynpBjo8tncmKci3udz/yAvMweeHrNmma7dCjLZgY/8 7 | PzK/jrBzOIYH2K9UndmlugWQk68wLkL7btkJOQIDAQABAoIBAEfDByZLuFVSTRgK 8 | q3CbCJZJ+KiRTeVxV6kfL2PrCbMLIC5fjx7/mqV3/jPJBHjy+Xu5v6lBH7JGbd6m 9 | 9pfhmBoVhr49q09XFllrFLSrMCQ+0EZEYGqCNHCTQRlxYEm0xKxiCR/89NItPkvS 10 | vmY8hgwYw9Z0AhZJ3LlQPDigMS/0FAnUALyZ6XT1AleQTZL8JkTY18Mt+v9BXAZW 11 | GcMZLG/N34voEwXu/8L/ZeQ84mNNvhr7xKYY2POkxUmyteFzQTt+1C93a6fUPFID 12 | iG9OK+n0LekIO7gz/qDE05v6Ox7Gi8ShnS0riQKIG3591RGo1Itn60AVwopDnxQc 13 | m1QluwECgYEA3WBd7+FFCS2H9uWguBpV9A9ZFoat2Ra7iCbeXLToox8eOALRtgg/ 14 | j+hJMx0m3f7EN3rCdslIiRzvx7e0sTUz1BSzMolxWd8rbFCjlIfHdRh7Lfov87c2 15 | d+twG2KnWZ5FuHFJHx14+GOSWYcYfXB6NLbs96po3AzTKVA8joLhk1kCgYEAxn86 16 | dVnnFmE3i3v33wwfNL9iNl2uJGFfiNfbI3qccNbqcbMtDXyxjRdWYt3bdRm62DjB 17 | LAvLmjbEZv4EKBAfCGIfGpSbxMYdhRiwE/WRD7PBAphlC8NHLTzwOjY3zJb5d3ql 18 | 3o3QG2hv8GO+ZxDRz8e2lbZHm+NUZQ0jMvUhyOECgYA62zQeYrYICqLrcYGZcOcJ 19 | TEtTQdtWrESPnkXgP/jZ5ZwTKCy5SNgdWpSbklSTEPj6BxlMm5c9XQQjBSR1OL/e 20 | 0uIRvZj1W/AfjBDxnGfbKfcV8j5tfXhtyZq0rTXxDbNbpkLakutmnpJ6WwCX2R+k 21 | hD3cWuImHwLFKR/wg7uVgQKBgHItyTCrsDO9ZDgF7UUOMf0jgZkINh2TLWMH13QH 22 | GWoHFgClMmspqPAh1n00ZQJs9H9f1i5B8rIYgs1DnJx2+YLW/lFJ3nhIBb8DvnUc 23 | sn63xXR6uNUVVSH+hajWey5iQcmqcTMdm/HIF2vN1D5w/F9q3+oqWfKfx3IBJpgv 24 | TWnBAoGBANm7mXlT/+xLzLW/+TGcTDmxrTGr2eIOjpgDxdy6q1i4ZpJDCybaulRj 25 | hJ12kE8bXPxlGP5ZGdcOn3Z76AYGSWLTLIa2rlg/lGreBZnKtXsN42PknQekGAV5 26 | JfZ7LEP1QhLBmy7uMO7V75NxPYqCqien/HQxGKlbHY8XB7Y24PWO 27 | -----END RSA PRIVATE KEY----- 28 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | attrs==19.2.0 2 | backoff==1.10.0 3 | backports-abc==0.5 4 | bcrypt==3.2.0 5 | cached-property==1.5.1 6 | certifi==2017.11.5 7 | cffi==1.15.0 8 | chardet==3.0.4 9 | charset-normalizer==2.0.11 10 | coverage==4.5.1 11 | cryptography==36.0.1 12 | Deprecated==1.2.13 13 | distro==1.6.0 14 | docker==4.3.1 15 | docker-compose==1.27.4 16 | dockerpty==0.4.1 17 | docopt==0.6.2 18 | elasticsearch==7.15.2 19 | flake8==3.9.0 20 | funcsigs==1.0.2 21 | future==0.16.0 22 | googleapis-common-protos==1.54.0 23 | grpcio==1.43.0 24 | idna==2.6 25 | iniconfig==2.0.0 26 | jsonschema==2.6.0 27 | mccabe==0.6.1 28 | more-itertools==8.12.0 29 | opentelemetry-api==1.11.0 30 | opentelemetry-exporter-otlp==1.11.0 31 | opentelemetry-exporter-otlp-proto-grpc==1.11.0 32 | opentelemetry-exporter-otlp-proto-http==1.11.0 33 | opentelemetry-instrumentation==0.24b0 34 | opentelemetry-proto==1.11.0 35 | opentelemetry-sdk==1.11.0 36 | opentelemetry-semantic-conventions==0.30b0 37 | packaging==21.3 38 | paramiko==2.9.2 39 | pluggy==0.13.1 40 | protobuf==3.19.4 41 | py==1.10.0 42 | pycodestyle==2.7.0 43 | pycparser==2.21 44 | pyflakes==2.3.0 45 | PyNaCl==1.5.0 46 | pyparsing==3.0.7 47 | pytest==6.2.5 48 | pytest-base-url==1.4.2 49 | pytest-html==3.2.0 50 | pytest-metadata==2.0.4 51 | pytest-otel==1.1.1 52 | pytest-random-order==1.1.0 53 | pytest-rerunfailures==11.0 54 | pytest-selenium==3.0.0 55 | pytest-variables==1.9.0 56 | python-dotenv==0.21.0 57 | PyYAML==5.3.1 58 | requests==2.26.0 59 | selenium==3.8.0 60 | singledispatch==3.4.0.3 61 | six==1.11.0 62 | tenacity==6.3.1 63 | texttable==0.9.1 64 | timeout-decorator==0.4.0 65 | toml==0.10.2 66 | tornado==5.1 67 | typing_extensions==4.1.1 68 | urllib3==1.26.8 69 | virtualenv==16.7.9 70 | waiting==1.4.1 71 | wcwidth==0.2.5 72 | webium==1.2.1 73 | websocket-client==0.54.0 74 | wrapt==1.13.3 75 | -------------------------------------------------------------------------------- /docker/elastic-agent/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG STACK_VERSION=8.2.0-SNAPSHOT 2 | FROM docker.elastic.co/beats-dev/golang-crossbuild:1.17.8-main-debian10 as build 3 | ARG ELASTIC_AGENT_BRANCH_OR_COMMIT="main" 4 | ARG ELASTIC_AGENT_REPO=https://github.com/elastic/apm-server.git 5 | ARG STACK_VERSION=8.2.0-SNAPSHOT 6 | 7 | ENV SRC=/go/src/github.com/elastic/apm-server 8 | ENV GOOS=linux 9 | 10 | RUN git clone ${ELASTIC_AGENT_REPO} ${SRC} \ 11 | && cd ${SRC} \ 12 | && git fetch -q origin '+refs/pull/*:refs/remotes/origin/pr/*' \ 13 | && git checkout ${ELASTIC_AGENT_BRANCH_OR_COMMIT} \ 14 | && git rev-parse HEAD \ 15 | && echo ${ELASTIC_AGENT_BRANCH_OR_COMMIT} 16 | 17 | RUN cd ${SRC} \ 18 | && go install github.com/magefile/mage@v1.12.1 \ 19 | && version=$(mage version) \ 20 | && apmdir=apm-server-${version}-linux-x86_64 \ 21 | && builddir=build/distributions/${apmdir} \ 22 | && mkdir -p ${builddir} \ 23 | && cp -f LICENSE.txt NOTICE.txt README.md apm-server.yml ${builddir} \ 24 | && go build -o ${builddir}/apm-server ./x-pack/apm-server \ 25 | && cd build/distributions \ 26 | && tar -czf /apm-server.tgz ${apmdir} 27 | 28 | ARG STACK_VERSION=8.2.0-SNAPSHOT 29 | FROM docker.elastic.co/beats/elastic-agent:${STACK_VERSION} 30 | 31 | USER root 32 | COPY --from=build /apm-server.tgz /tmp 33 | RUN cat /usr/share/elastic-agent/.build_hash.txt|cut -b 1-6 > /sha.txt 34 | #RUN rm /usr/share/elastic-agent/data/elastic-agent-$(cat /sha.txt)/downloads/apm-server* 35 | RUN dst=/usr/share/elastic-agent/data/elastic-agent-$(cat /sha.txt)/install \ 36 | && mkdir -p ${dst} \ 37 | && tar -xzf /tmp/apm-server.tgz -C ${dst} \ 38 | && rm /tmp/apm-server.tgz \ 39 | && chown -R elastic-agent:elastic-agent ${dst} 40 | USER elastic-agent 41 | # Add healthcheck for docker/healthcheck metricset to check during testing 42 | HEALTHCHECK CMD exit 0 43 | -------------------------------------------------------------------------------- /docker/opbeans/dotnet/Dockerfile: -------------------------------------------------------------------------------- 1 | 2 | # Stage 0: clone opbeans-dotnet and build the project 3 | # DOTNET_AGENT_VERSION parameterise the DOTNET agent version to be used 4 | # if unset then it uses the build generated above. (TODO: to be done) 5 | # DOTNET_AGENT_REPO and DOTNET_AGENT_BRANCH parameterise the DOTNET agent 6 | # repo and branch (or commit) to use. 7 | FROM mcr.microsoft.com/dotnet/sdk:6.0 AS opbeans-dotnet 8 | ENV DOTNET_ROOT=/usr/share/dotnet 9 | ARG DOTNET_AGENT_REPO=elastic/apm-agent-dotnet 10 | ARG DOTNET_AGENT_BRANCH=main 11 | ARG DOTNET_AGENT_VERSION= 12 | ARG OPBEANS_DOTNET_REPO=elastic/opbeans-dotnet 13 | ARG OPBEANS_DOTNET_BRANCH=main 14 | # Workaround for https://github.com/dotnet/sdk/issues/14497 15 | ARG DOTNET_HOST_PATH=/usr/share/dotnet/dotnet 16 | WORKDIR /src 17 | COPY . /src 18 | # install SDK version in global.json of elastic/apm-agent-dotnet 19 | # Needed when building branches that specify 3.1.100 SDK in global.json 20 | RUN curl --retry 5 -sSL https://dot.net/v1/dotnet-install.sh | bash /dev/stdin --install-dir ${DOTNET_ROOT} -version 3.1.100 21 | # SDK 5.x is also needed 22 | RUN curl --retry 5 -sSL https://dot.net/v1/dotnet-install.sh | bash /dev/stdin --install-dir ${DOTNET_ROOT} -version 5.0.100 23 | # SDK 7.x is also needed 24 | RUN curl --retry 5 -sSL https://dot.net/v1/dotnet-install.sh | bash /dev/stdin --install-dir ${DOTNET_ROOT} -version 7.0.100 25 | RUN ./run.sh 26 | 27 | # Stage 2: Run the opbeans-dotnet app 28 | FROM mcr.microsoft.com/dotnet/core/aspnet:3.1.7-alpine AS runtime 29 | WORKDIR /app 30 | COPY --from=opbeans-dotnet /src/opbeans-dotnet/opbeans-dotnet/build ./ 31 | COPY --from=opbeans/opbeans-frontend:latest /app/build /opbeans-frontend 32 | RUN apk update \ 33 | && apk add --no-cache curl \ 34 | && rm -rf /var/cache/apk/* 35 | EXPOSE 3000 36 | ENTRYPOINT ["dotnet", "opbeans-dotnet.dll", "--urls", "http://*:3000"] 37 | -------------------------------------------------------------------------------- /docker/Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: help 2 | .DEFAULT_GOAL := help 3 | BATS_VERSION = "v1.1.0" 4 | LTS_ALPINE ?= 12-alpine 5 | 6 | help: ## Display this help text 7 | @grep -E '^[a-zA-Z_-]+[%]?:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}' 8 | 9 | bats: ## Install bats in the project itself 10 | @test -e "bats-core" && (cd bats-core ; git checkout ${BATS_VERSION}) || git clone https://github.com/bats-core/bats-core.git --depth=1 --branch ${BATS_VERSION} 11 | 12 | prepare-test: bats ## Prepare the bats dependencies 13 | @docker pull node:${LTS_ALPINE} 14 | @mkdir -p target 15 | @git submodule sync 16 | @git submodule update --init --recursive 17 | 18 | test-%: prepare-test ## Run the tests for the specific app 19 | @DOCKERFILE=$* bats-core/bin/bats --tap tests | tee target/results.tap 20 | @APP=$*; docker run --rm -e APP=$${APP} -v "$(CURDIR)":/usr/src/app -w /usr/src/app node:${LTS_ALPINE} \ 21 | sh -c 'npm install tap-xunit -g && cat target/results.tap | tap-xunit --package="co.elastic.apm.integration.testing.$${APP}" > target/junit-$${APP}-results.xml' 22 | 23 | push-%: prepare-test ## Push the Docker image to the docker.elastic.co repository 24 | docker push "docker.elastic.co/observability-ci/it_$*" 25 | 26 | all-opbeans-tests: test-opbeans-dotnet test-opbeans-frontend_nginx test-opbeans-go test-opbeans-java test-opbeans-node test-opbeans-python test-opbeans-ruby test-opbeans-rum ## Run the tests for all the opbeans 27 | 28 | all-tests: test-apm-server all-opbeans-tests ## Run the tests for all the apps 29 | 30 | all-push: push-apm-server push-opbeans-dotnet push-opbeans-frontend_nginx push-opbeans-go push-opbeans-java push-opbeans-node push-opbeans-python push-opbeans-ruby push-opbeans-rum ## Push Docker images to the registry. 31 | 32 | clean: ## Clean autogenerated files/folders 33 | @rm -rf bats-core 34 | @rm -rf target 35 | -------------------------------------------------------------------------------- /.ci/bump-stack-release-version.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # The script check the content of the file "scripts/modules/cli.py" 4 | # - if different than $1 and DRY_RUN is set to: 5 | # - "false" then it updates it with the value of $1 6 | # - "true" then it only reports the value of $1 7 | # - otherwise it exits without any value reported 8 | # 9 | # Parameters: 10 | # $1 -> the release version to be bumped. Mandatory. 11 | # 12 | 13 | CLI_FILE=scripts/modules/cli.py 14 | RELEASE_VERSION=${1:?$MSG} 15 | MINOR_MAJOR_RELEASE_VERSION=${RELEASE_VERSION%.*} 16 | 17 | OS=$(uname -s| tr '[:upper:]' '[:lower:]') 18 | 19 | if [ "${OS}" == "darwin" ] ; then 20 | SED="sed -i .bck" 21 | else 22 | SED="sed -i" 23 | fi 24 | 25 | if grep -q "'${RELEASE_VERSION}'" ${CLI_FILE} ; then 26 | ## No change 27 | # early return with no output 28 | exit 0 29 | else 30 | if test "$DRY_RUN" == "false" ; then 31 | ## Value changed to $1" - NO dry run 32 | # do something such as writing a file here 33 | 34 | if grep -q "'${MINOR_MAJOR_RELEASE_VERSION}':" ${CLI_FILE} ; then 35 | ## Update new major.minor.patch 36 | ${SED} -E -e "s#('${MINOR_MAJOR_RELEASE_VERSION}'): '[0-9]+\.[0-9]+\.[0-9]'#\1: '${RELEASE_VERSION}'#g" ${CLI_FILE} 37 | else 38 | ## Add new major.minor 39 | ${SED} -E -e "s&(# UPDATECLI_AUTOMATION.*)&'${MINOR_MAJOR_RELEASE_VERSION}': '${RELEASE_VERSION}',\n \1&g" ${CLI_FILE} 40 | fi 41 | fi 42 | 43 | # Report on stdout 44 | if grep -q "'${MINOR_MAJOR_RELEASE_VERSION}':" ${CLI_FILE} ; then 45 | ## Update new major.minor.patch 46 | sed -E -e "s#('${MINOR_MAJOR_RELEASE_VERSION}'): '[0-9]+\.[0-9]+\.[0-9]'#\1: '${RELEASE_VERSION}'#g" ${CLI_FILE} 47 | else 48 | ## Add new major.minor 49 | sed -E -e "s&(# UPDATECLI_AUTOMATION.*)&'${MINOR_MAJOR_RELEASE_VERSION}': '${RELEASE_VERSION}',\n \1&g" ${CLI_FILE} 50 | fi 51 | exit 0 52 | fi 53 | -------------------------------------------------------------------------------- /.ci/packer_cache.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # shellcheck disable=SC1091 4 | source /usr/local/bin/bash_standard_lib.sh 5 | 6 | DOCKER_IMAGES="alpine:3.4 7 | alpine:latest 8 | busybox:latest 9 | docker.elastic.co/beats-dev/fpm:1.11.0 10 | docker.elastic.co/observability-ci/apm-integration-testing:daily 11 | docker.elastic.co/observability-ci/golang-mage:1.12.4 12 | docker.elastic.co/observability-ci/jruby:9.1-7-jdk 13 | docker.elastic.co/observability-ci/jruby:9.2-11-jdk 14 | docker.elastic.co/observability-ci/jruby:9.2-12-jdk 15 | docker.elastic.co/observability-ci/jruby:9.2-8-jdk 16 | golang:1.8 17 | golang:1.9 18 | golang:1.10 19 | golang:1.11 20 | golang:1.12 21 | golang:1.12.4 22 | golang:latest 23 | haproxy:1.9 24 | jruby:9.1 25 | jruby:9.2 26 | maven:3.5.3-jdk-10 27 | mcr.microsoft.com/dotnet/core/aspnet:2.2-alpine 28 | mcr.microsoft.com/dotnet/core/sdk:2.2 29 | node:10 30 | node:10.0 31 | node:12 32 | node:12.0 33 | node:13 34 | node:13.0 35 | node:14 36 | node:14.0 37 | node:8-slim 38 | node:8 39 | node:8.6 40 | opbeans/opbeans-frontend:latest 41 | opbeans/opbeans-go:latest 42 | opbeans/opbeans-java:latest 43 | opbeans/opbeans-loadgen:latest 44 | opbeans/opbeans-node:latest 45 | opbeans/opbeans-python:latest 46 | opbeans/opbeans-ruby:latest 47 | adoptopenjdk:11-jre-hotspot 48 | php:7-alpine 49 | php:7.2-alpine 50 | php:7.3-alpine 51 | pypy:2 52 | pypy:3 53 | python:2.7 54 | python:3 55 | python:3.4 56 | python:3.5 57 | python:3.6 58 | python:3.7 59 | python:3.7-stretch 60 | python:3.8 61 | ruby:2.3 62 | ruby:2.4 63 | ruby:2.5 64 | ruby:2.6 65 | ruby:latest 66 | wordpress:php7.3-fpm-alpine 67 | " 68 | 69 | if [ -x "$(command -v docker)" ]; then 70 | for di in ${DOCKER_IMAGES} 71 | do 72 | (retry 2 docker pull "${di}") || echo "Error pulling ${di} Docker image, we continue" 73 | done 74 | (retry 2 .ci/scripts/build-docker-all.sh) || echo "Error building images for the apm-it, we continue" 75 | fi 76 | -------------------------------------------------------------------------------- /docker/opbeans/go/Dockerfile: -------------------------------------------------------------------------------- 1 | # Stage 0: clone opbeans-go and apm-agent-go, and build. 2 | # 3 | # GO_AGENT_REPO and GO_AGENT_BRANCH parameterise the Go agent 4 | # repo and branch (or commit) to use. 5 | FROM golang:stretch 6 | ENV GO111MODULE=on 7 | ARG OPBEANS_GO_REPO=elastic/opbeans-go 8 | ARG OPBEANS_GO_BRANCH=main 9 | WORKDIR /src/opbeans-go 10 | 11 | RUN git clone https://github.com/${OPBEANS_GO_REPO}.git . \ 12 | && git fetch -q origin '+refs/pull/*:refs/remotes/origin/pr/*' \ 13 | && git checkout ${OPBEANS_GO_BRANCH} 14 | RUN rm -fr vendor/go.elastic.co/apm 15 | 16 | ARG GO_AGENT_REPO=elastic/apm-agent-go 17 | ARG GO_AGENT_BRANCH=main 18 | 19 | RUN git clone https://github.com/${GO_AGENT_REPO}.git /src/go.elastic.co/apm 20 | RUN (cd /src/go.elastic.co/apm \ 21 | && git fetch -q origin '+refs/pull/*:refs/remotes/origin/pr/*' \ 22 | && git checkout ${GO_AGENT_BRANCH}) 23 | 24 | # Don't complain if there are missing dependencies in go.mod, 25 | # as we may be building against an unreleased branch. 26 | ENV GOFLAGS=-mod=mod 27 | 28 | # Add "replace" stanzas to go.mod to use the local agent repo 29 | RUN go list -m all | grep apm | cut -d' ' -f1 | sed -e 's_\(.*\)/v[0-9]\+$_\1_' | xargs -i go mod edit -replace {}=/src/{} 30 | RUN go build 31 | 32 | # Stage 1: copy static assets from opbeans/opbeans-frontend and 33 | # opbeans-go from stage 0 into a minimal image. 34 | FROM gcr.io/distroless/base 35 | ENV ELASTIC_APM_ENABLE_LOG_CORRELATION=true 36 | ENV ELASTIC_APM_LOG_LEVEL=DEBUG 37 | 38 | COPY --from=opbeans/opbeans-frontend:latest /app/build /opbeans-frontend 39 | COPY --from=0 /src/opbeans-go/opbeans-go / 40 | COPY --from=0 /src/opbeans-go/db / 41 | EXPOSE 3000 42 | 43 | HEALTHCHECK \ 44 | --interval=10s --retries=10 --timeout=3s \ 45 | CMD ["/opbeans-go", "-healthcheck", "localhost:3000"] 46 | 47 | CMD ["/opbeans-go", "-log-json", "-log-level=debug", "-listen=:3000", "-frontend=/opbeans-frontend", "-db=postgres:", "-cache=redis://redis:6379"] 48 | -------------------------------------------------------------------------------- /docker/apm-server/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG apm_server_base_image=docker.elastic.co/apm/apm-server:8.9.0-SNAPSHOT 2 | ARG go_version=1.20.7 3 | ARG apm_server_binary=apm-server 4 | 5 | ############################################################################### 6 | # Build stage: build apm-server binary and update apm-server.yml 7 | ############################################################################### 8 | 9 | FROM golang:${go_version} AS build 10 | ARG apm_server_binary 11 | 12 | # install make update prerequisites 13 | RUN echo 'Acquire::Retries "5";' > /etc/apt/apt.conf.d/80-retries 14 | RUN apt-get -qq update \ 15 | && apt-get -qq install -y python3 python3-pip python3-venv rsync 16 | 17 | ARG apm_server_branch_or_commit=main 18 | ARG apm_server_repo=https://github.com/elastic/apm-server.git 19 | ENV SRC=/go/src/github.com/elastic/apm-server 20 | 21 | # Git clone and checkout given either the branch, commit or both. 22 | RUN git clone ${apm_server_repo} ${SRC} \ 23 | && cd ${SRC} && git fetch -q origin '+refs/pull/*:refs/remotes/origin/pr/*' \ 24 | && git checkout ${apm_server_branch_or_commit} 25 | 26 | RUN cd ${SRC} && git rev-parse HEAD && echo ${apm_server_branch_or_commit} 27 | 28 | RUN make -C ${SRC} update ${apm_server_binary} \ 29 | && sed -zri -e 's/output.elasticsearch:(\n[^\n]*){5}/output.elasticsearch:\n hosts: ["\${ELASTICSEARCH_HOSTS:elasticsearch:9200}"]/' -e 's/ host: "localhost:8200"/ host: "0.0.0.0:8200"/' ${SRC}/apm-server.yml \ 30 | && chmod go+r ${SRC}/apm-server.yml 31 | 32 | ############################################################################### 33 | # Image update stage: layer apm-server binary and apm-server.yml on top of the 34 | # base image. 35 | ############################################################################### 36 | 37 | FROM ${apm_server_base_image} 38 | ARG apm_server_binary 39 | ENV SRC=/go/src/github.com/elastic/apm-server 40 | COPY --from=build ${SRC}/${apm_server_binary} /usr/share/apm-server/apm-server 41 | COPY --from=build ${SRC}/apm-server.yml /usr/share/apm-server/apm-server.yml 42 | 43 | CMD ./apm-server -e -d "*" 44 | 45 | # Add healthcheck for docker/healthcheck metricset to check during testing 46 | HEALTHCHECK CMD exit 0 47 | -------------------------------------------------------------------------------- /docker/opbeans/rum/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM node:16-bullseye-slim 2 | 3 | ENV DEBIAN_FRONTEND noninteractive 4 | # Install latest chrome dev package and fonts to support major charsets (Chinese, Japanese, Arabic, Hebrew, Thai and a few others) 5 | # Note: this installs the necessary libs to make the bundled version of Chromium that Puppeteer 6 | # installs, work 7 | RUN echo 'Acquire::Retries "5";' > /etc/apt/apt.conf.d/80-retries 8 | RUN apt update -qq \ 9 | && apt install -qq -y --no-install-recommends \ 10 | curl \ 11 | git \ 12 | gnupg \ 13 | libgconf-2-4 \ 14 | libxss1 \ 15 | libxtst6 \ 16 | python \ 17 | g++ \ 18 | build-essential \ 19 | chromium \ 20 | chromium-sandbox \ 21 | dumb-init \ 22 | fonts-ipafont-gothic fonts-wqy-zenhei fonts-thai-tlwg fonts-kacst \ 23 | && rm -rf /var/lib/apt/lists/* \ 24 | && rm -rf /src/*.deb 25 | 26 | # Uncomment to skip the chromium download when installing puppeteer. If you do, 27 | # you'll need to launch puppeteer with: 28 | # browser.launch({executablePath: 'google-chrome-unstable'}) 29 | ENV PUPPETEER_SKIP_CHROMIUM_DOWNLOAD=true 30 | ENV CHROME_PATH=/usr/bin/chromium 31 | ENV PUPPETEER_EXECUTABLE_PATH=/usr/bin/chromium 32 | 33 | WORKDIR /home/pptruser 34 | 35 | # Add user to not run as a root. 36 | RUN groupadd -r pptruser && useradd -r -g pptruser -G audio,video pptruser \ 37 | && mkdir -p /home/pptruser/Downloads \ 38 | && chown -R pptruser:pptruser /home/pptruser 39 | 40 | COPY package*.json /home/pptruser/ 41 | COPY tasks.js /home/pptruser/ 42 | COPY processes.config.js /home/pptruser/ 43 | RUN chown -R pptruser:pptruser /home/pptruser; 44 | 45 | # Run everything after as non-privileged user. 46 | USER pptruser 47 | 48 | # the install is retry threee times with a pause of 10 seconds 49 | RUN for i in 1 2 3; \ 50 | do \ 51 | npm install --no-optional;\ 52 | sleep 10; \ 53 | ([ $i -eq 3 ] && exit 1) || true; \ 54 | done; 55 | 56 | # If running Docker >= 1.13.0 use docker run's --init arg to reap zombie processes, otherwise 57 | # uncomment the following lines to have `dumb-init` as PID 1 58 | ENTRYPOINT ["dumb-init", "--"] 59 | CMD ["node_modules/.bin/pm2-docker", "processes.config.js"] 60 | -------------------------------------------------------------------------------- /docker/opbeans/frontend_nginx/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | echo "******************************************************************************************************************************************" 3 | echo "* You must define ELASTIC_OPBEANS_API_SERVER to redirect all request to http://host:port/api/* (default http://localhost:3000) *" 4 | echo "* You can define ELASTIC_APM_JS_BASE_SERVER_URL to send the APM request (default http://localhost:8200) *" 5 | echo "* You can define ELASTIC_APM_JS_BASE_SERVICE_VERSION to set the service version (default v1.0.0) *" 6 | echo "* You can define ELASTIC_APM_JS_BASE_SERVICE_NAME to set the service name (default opbeans-rum) *" 7 | echo "* You can define EXTRA_OPTS to set any additional option you need (default empty) EXTRA_OPTS='setting1: value1, setting2: value2' *" 8 | echo "******************************************************************************************************************************************" 9 | 10 | ELASTIC_OPBEANS_API_SERVER=${ELASTIC_OPBEANS_API_SERVER:-"http://localhost:3000"} 11 | ELASTIC_APM_JS_BASE_SERVER_URL=${ELASTIC_APM_JS_BASE_SERVER_URL:-"http://localhost:8200"} 12 | ELASTIC_APM_JS_BASE_SERVICE_NAME=${ELASTIC_APM_JS_BASE_SERVICE_NAME:-"opbeans-rum"} 13 | ELASTIC_APM_JS_BASE_SERVICE_VERSION=${ELASTIC_APM_JS_BASE_SERVICE_VERSION:-"$RANDOM"} 14 | 15 | echo "ELASTIC_OPBEANS_API_SERVER=${ELASTIC_OPBEANS_API_SERVER}" 16 | echo "ELASTIC_APM_JS_BASE_SERVER_URL=${ELASTIC_APM_JS_BASE_SERVER_URL}" 17 | echo "ELASTIC_APM_JS_BASE_SERVICE_VERSION=${ELASTIC_APM_JS_BASE_SERVICE_VERSION}" 18 | echo "ELASTIC_APM_JS_BASE_SERVICE_NAME=${ELASTIC_APM_JS_BASE_SERVICE_NAME}" 19 | 20 | sed "s@{{ ELASTIC_OPBEANS_API_SERVER }}@${ELASTIC_OPBEANS_API_SERVER}@g" /etc/nginx/conf.d/default.template > /etc/nginx/conf.d/default.conf 21 | sed -e "s@{{ ELASTIC_APM_JS_BASE_SERVER_URL }}@${ELASTIC_APM_JS_BASE_SERVER_URL}@g" \ 22 | -e "s@{{ ELASTIC_APM_JS_BASE_SERVICE_VERSION }}@${ELASTIC_APM_JS_BASE_SERVICE_VERSION}@g" \ 23 | -e "s@{{ ELASTIC_APM_JS_BASE_SERVICE_NAME }}@${ELASTIC_APM_JS_BASE_SERVICE_NAME}@g" \ 24 | -e "s@{{ EXTRA_OPTS }}@${EXTRA_OPTS}@g" \ 25 | /usr/share/nginx/html/rum-config.template > /usr/share/nginx/html/rum-config.js 26 | 27 | exec nginx-debug -g 'daemon off;' 28 | -------------------------------------------------------------------------------- /docker/kibana/kibana-8.yml: -------------------------------------------------------------------------------- 1 | --- 2 | xpack.fleet.packages: 3 | - name: system 4 | version: latest 5 | - name: elastic_agent 6 | version: latest 7 | - name: apm 8 | version: latest 9 | - name: fleet_server 10 | version: latest 11 | 12 | xpack.fleet.agentPolicies: 13 | - name: Fleet Server + APM policy 14 | id: fleet-server-apm-policy 15 | description: Fleet server policy with APM and System logs and metrics enabled 16 | namespace: default 17 | is_default_fleet_server: true 18 | is_managed: false 19 | monitoring_enabled: 20 | - logs 21 | - metrics 22 | package_policies: 23 | - name: system-1 24 | package: 25 | name: system 26 | - name: apm-1 27 | package: 28 | name: apm 29 | inputs: 30 | - type: apm 31 | keep_enabled: true 32 | vars: 33 | - name: host 34 | value: 0.0.0.0:8200 35 | frozen: true 36 | - name: url 37 | value: "${ELASTIC_APM_SERVER_URL}" 38 | frozen: true 39 | - name: enable_rum 40 | value: true 41 | frozen: true 42 | - name: read_timeout 43 | value: 1m 44 | frozen: true 45 | - name: shutdown_timeout 46 | value: 2m 47 | frozen: true 48 | - name: write_timeout 49 | value: 1m 50 | frozen: true 51 | - name: rum_allow_headers 52 | value: 53 | - x-custom-header 54 | frozen: true 55 | - name: secret_token 56 | value: "${ELASTIC_APM_SECRET_TOKEN}" 57 | frozen: true 58 | - name: tls_enabled 59 | value: ${ELASTIC_APM_TLS} 60 | frozen: true 61 | - name: tls_certificate 62 | value: /usr/share/apm-server/config/certs/tls.crt 63 | frozen: true 64 | - name: tls_key 65 | value: /usr/share/apm-server/config/certs/tls.key 66 | frozen: true 67 | - name: Fleet Server 68 | package: 69 | name: fleet_server 70 | inputs: 71 | - type: fleet-server 72 | keep_enabled: true 73 | vars: 74 | - name: host 75 | value: 0.0.0.0 76 | frozen: true 77 | - name: port 78 | value: 8220 79 | frozen: true 80 | -------------------------------------------------------------------------------- /docker/opbeans/rum/tasks.js: -------------------------------------------------------------------------------- 1 | const puppeteer = require('puppeteer') 2 | 3 | const baseUrl = process.env.OPBEANS_BASE_URL || 'http://www.opbeans.com'; 4 | let url = baseUrl 5 | 6 | function sleep(ms) { 7 | return new Promise(resolve => { 8 | setTimeout(resolve, ms) 9 | }) 10 | } 11 | 12 | async function run() { 13 | const browser = await puppeteer.launch({ 14 | pipe: true, 15 | headless: true, 16 | dumpio: true, 17 | // waitForInitialPage: false, 18 | // devtools: false, 19 | // ignoreDefaultArgs: false, 20 | args: [ 21 | '--disable-dev-shm-usage', 22 | // debug port is exported 23 | //'--remote-debugging-address=0.0.0.0', 24 | //'--remote-debugging-port=9222', 25 | // chromium need to use --no-sandbox on linux/arm64 26 | // '--disable-gpu-sandbox', 27 | // '--disable-namespace-sandbox', 28 | '--disable-setuid-sandbox', 29 | '--no-sandbox', 30 | '--no-zygote', 31 | '--disable-gpu', 32 | '--disable-audio-output', 33 | // '--disable-gpu-rasterization', 34 | //'--no-first-run', 35 | //'--safe-mode', 36 | // the pipe is broken sometimes with a single process does not happen 37 | '--headless', 38 | '--single-process' 39 | ] // see https://github.com/puppeteer/puppeteer/blob/master/docs/troubleshooting.md#tips 40 | // https://peter.sh/experiments/chromium-command-line-switches/ 41 | }) 42 | // debugger; 43 | const page = await browser.newPage() 44 | page.on('console', msg => console.log( 45 | 'PAGE LOG:', msg.type() + "\t" + msg.text() + "\t" + msg.location().url + " (" + msg.location().lineNumber + ":" + msg.location().columnNumber + ")") 46 | ) 47 | for (; ;) { 48 | try { 49 | await page.goto(url) 50 | console.log('Checking URL:' + url); 51 | url = await page.evaluate(defaultUrl => { 52 | // activateLoadGeneration is defined in opbeans-frontend 53 | if (typeof window.activateLoadGeneration === 'function') { 54 | console.log('Activating route change load generation') 55 | window.activateLoadGeneration() 56 | } 57 | const links = document.querySelectorAll('a[href^="/"]') 58 | if (links && links.length) { 59 | const i = Math.floor(Math.random() * links.length) 60 | return links[i].href 61 | } else { 62 | return defaultUrl 63 | } 64 | }, 65 | baseUrl 66 | ) 67 | } catch (e) { 68 | // this will catch the error, log it and let the process continue 69 | console.error(`Error occurred while evaluating ${url}:`, e) 70 | } 71 | console.log(url) 72 | await sleep(8000 + Math.floor(Math.random() * 10000)) 73 | } 74 | } 75 | 76 | run().catch(console.error.bind(console)) 77 | -------------------------------------------------------------------------------- /.mergify.yml: -------------------------------------------------------------------------------- 1 | queue_rules: 2 | - name: auto-merge 3 | conditions: 4 | - check-success=apm-ci/pr-merge 5 | - label=automation 6 | 7 | 8 | pull_request_rules: 9 | - name: ask to resolve conflict 10 | conditions: 11 | - -merged 12 | - -closed 13 | - conflict 14 | - -author=apmmachine 15 | actions: 16 | comment: 17 | message: | 18 | This pull request is now in conflicts. Could you fix it @{{author}}? 19 | To fixup this pull request, you can check out it locally. See documentation: https://help.github.com/articles/checking-out-pull-requests-locally/ 20 | ``` 21 | git fetch upstream 22 | git checkout -b {{head}} upstream/{{head}} 23 | git merge upstream/{{base}} 24 | git push upstream {{head}} 25 | ``` 26 | - name: automate backport creation to 7.x branch 27 | conditions: 28 | - merged 29 | - base=main 30 | - label=backport 31 | actions: 32 | backport: 33 | assignees: 34 | - "{{ author }}" 35 | branches: 36 | - "7.x" 37 | labels: 38 | - backport 39 | - name: automatic merge backported Pull Requests from mergify when CI passes. 40 | conditions: 41 | - check-success=apm-ci/pr-merge 42 | - author=mergify[bot] 43 | - label=backport 44 | - base=7.x 45 | actions: 46 | queue: 47 | name: auto-merge 48 | method: squash 49 | - name: automatic merge when CI passes and the file tests/versions/apm_server.yml is modified. 50 | conditions: 51 | - files~=^tests/versions/apm_server.yml$ 52 | - check-success=apm-ci/pr-merge 53 | actions: 54 | queue: 55 | name: auto-merge 56 | method: squash 57 | - name: automatic merge when CI passes and the file scripts/modules/cli.py is modified. 58 | conditions: 59 | - files~=^scripts/modules/cli.py$ 60 | - check-success=apm-ci/pr-merge 61 | actions: 62 | queue: 63 | name: auto-merge 64 | method: squash 65 | - name: delete upstream branch after merging changes on scripts/modules/cli.py or it's closed 66 | conditions: 67 | - or: 68 | - merged 69 | - closed 70 | - and: 71 | - label=automation 72 | - head~=^update.* 73 | actions: 74 | delete_head_branch: 75 | - name: notify the backport has not been merged yet 76 | conditions: 77 | - -merged 78 | - -closed 79 | - author=mergify[bot] 80 | - "#check-success>0" 81 | - schedule=Mon-Mon 06:00-10:00[Europe/Paris] 82 | actions: 83 | comment: 84 | message: | 85 | This pull request has not been merged yet. Could you please review and merge it @{{ assignee | join(', @') }}? 🙏 86 | -------------------------------------------------------------------------------- /docker/metricbeat/metricbeat.yml: -------------------------------------------------------------------------------- 1 | --- 2 | setup.template.settings: 3 | index.number_of_shards: 1 4 | index.codec: best_compression 5 | index.number_of_replicas: 0 6 | 7 | setup.kibana: 8 | host: "${KIBANA_HOST:kibana:5601}" 9 | 10 | output.elasticsearch: 11 | hosts: '${ELASTICSEARCH_HOSTS:elasticsearch:9200}' 12 | username: '${ELASTICSEARCH_USERNAME:}' 13 | password: '${ELASTICSEARCH_PASSWORD:}' 14 | 15 | logging.json: true 16 | logging.metrics.enabled: false 17 | 18 | monitoring.enabled: true 19 | 20 | processors: 21 | - add_host_metadata: ~ 22 | - add_cloud_metadata: ~ 23 | - add_docker_metadata: ~ 24 | - add_kubernetes_metadata: ~ 25 | ################################################################################################### 26 | ## modules 27 | ################################################################################################### 28 | metricbeat.config.modules: 29 | path: /dev/null 30 | reload.enabled: false 31 | 32 | metricbeat.modules: 33 | - module: golang 34 | metricsets: ["expvar", "heap"] 35 | period: 10s 36 | hosts: ["${APM_SERVER_PPROF_HOST:apm-server:6060}"] 37 | heap.path: "/debug/vars" 38 | expvar: 39 | namespace: "apm-server" 40 | path: "/debug/vars" 41 | - module: docker 42 | metricsets: ["container", "cpu", "diskio", "healthcheck", "info", "memory", "network"] 43 | hosts: ["unix:///var/run/docker.sock"] 44 | period: 10s 45 | 46 | metricbeat.autodiscover: 47 | providers: 48 | - type: docker 49 | hints.enabled: true 50 | templates: 51 | - condition: 52 | contains: 53 | docker.container.image: "redis" 54 | config: 55 | - module: redis 56 | metricsets: ["info", "keyspace"] 57 | hosts: "${data.host}:6379" 58 | - condition: 59 | contains: 60 | docker.container.image: "postgres" 61 | config: 62 | - module: postgresql 63 | metricsets: ["database", "bgwriter", "activity"] 64 | hosts: ["postgres://${data.host}:5432?sslmode=disable"] 65 | password: verysecure 66 | username: postgres 67 | - condition: 68 | contains: 69 | docker.container.image: "kafka" 70 | config: 71 | - module: kafka 72 | metricsets: ["consumergroup", "partition"] 73 | period: 10s 74 | hosts: "${data.host}:9092" 75 | - condition: 76 | contains: 77 | docker.container.image: "logstash" 78 | config: 79 | - module: logstash 80 | metricsets: ["node", "node_stats"] 81 | period: 10s 82 | hosts: "${data.host}:9600" 83 | 84 | http.enabled: true 85 | http.host: localhost 86 | http.port: 5066 87 | -------------------------------------------------------------------------------- /.ci/bump-elastic-stack.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bump elastic stack to latest version 3 | 4 | actions: 5 | default: 6 | title: '[updatecli] Bump elastic stack version to {{ source "latestRelease" }}/{{ source "major-minor-patch" }}' 7 | kind: github/pullrequest 8 | spec: 9 | labels: 10 | - automation 11 | - dependencies 12 | - backport-skip 13 | scmid: default 14 | 15 | scms: 16 | default: 17 | kind: github 18 | spec: 19 | user: '{{ requiredEnv "GIT_USER" }}' 20 | email: '{{ requiredEnv "GIT_EMAIL" }}' 21 | owner: elastic 22 | repository: apm-integration-testing 23 | token: '{{ requiredEnv "GITHUB_TOKEN" }}' 24 | username: '{{ requiredEnv "GIT_USER" }}' 25 | branch: main 26 | 27 | sources: 28 | latestRelease: 29 | name: Get Latest Elastic Release 30 | kind: githubrelease 31 | transformers: 32 | - trimprefix: v 33 | spec: 34 | owner: elastic 35 | repository: elasticsearch 36 | token: '{{ requiredEnv "GITHUB_TOKEN" }}' 37 | username: '{{ requiredEnv "GIT_USER" }}' 38 | versionfilter: 39 | kind: regex 40 | pattern: ^v8\.(\d+)\.(\d+)$ 41 | 42 | latestVersion: 43 | name: Get latest snapshot build for main 44 | kind: json 45 | spec: 46 | file: https://storage.googleapis.com/artifacts-api/snapshots/main.json 47 | key: .version 48 | 49 | major-minor-patch: 50 | name: Get major-minor-patch version 51 | kind: shell 52 | dependson: 53 | - latestVersion 54 | transformers: 55 | - findsubmatch: 56 | pattern: '^(\d+.\d+.\d+)-.+$' 57 | captureindex: 1 58 | spec: 59 | command: echo {{ source "latestVersion" }} 60 | 61 | conditions: 62 | dockerTag: 63 | name: Is docker image elasticsearch:{{ source "latestRelease" }} published 64 | kind: dockerimage 65 | spec: 66 | image: elasticsearch 67 | tag: '{{ source "latestRelease" }}' 68 | sourceid: latestRelease 69 | 70 | targets: 71 | update-common-sh: 72 | name: 'Update elastic stack version to {{ source "latestRelease" }}' 73 | sourceid: latestRelease 74 | scmid: default 75 | kind: file 76 | spec: 77 | file: .ci/scripts/common.sh 78 | content: '"{{ source `latestRelease` }}"' 79 | matchpattern: '"[0-9]+\.[0-9]+\.[0-9]+"' 80 | 81 | update-release-cli-py: 82 | name: 'Update elastic stack version to {{ source "latestRelease" }} (release)' 83 | sourceid: latestRelease 84 | scmid: default 85 | kind: shell 86 | spec: 87 | command: bash .ci/bump-stack-release-version.sh 88 | 89 | update-bc-cli-py: 90 | name: 'Update elastic stack version to {{ source "major-minor-patch" }} (bc)' 91 | sourceid: major-minor-patch 92 | scmid: default 93 | kind: shell 94 | spec: 95 | command: bash .ci/bump-stack-release-version.sh 96 | -------------------------------------------------------------------------------- /.ci/scripts/common.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # for details about how it works see https://github.com/elastic/apm-integration-testing#continuous-integration 3 | 4 | function stopEnv() { 5 | make stop-env 6 | } 7 | 8 | function runTests() { 9 | targets="" 10 | if [ -z "${REUSE_CONTAINERS}" ]; then 11 | trap "stopEnv" EXIT 12 | targets="destroy-env" 13 | fi 14 | targets="${targets} $*" 15 | export VENV=${VENV:-${TMPDIR:-/tmp/}venv-$$} 16 | # shellcheck disable=SC2086 17 | make ${targets} 18 | } 19 | 20 | function prepareAndRunAll() { 21 | ## This is for the CI 22 | if [ -d /var/lib/jenkins/.m2/repository ] ; then 23 | echo "m2 cache folder has been found in the CI worker" 24 | cp -rf /var/lib/jenkins/.m2/repository docker/java/spring/.m2 25 | BUILD_OPTS="${BUILD_OPTS} --java-m2-cache" 26 | else 27 | echo "m2 cache folder has NOT been found in the CI worker" 28 | fi 29 | 30 | # export the variables to force the be defined in the Docker container 31 | export ELASTIC_APM_SECRET_TOKEN=${ELASTIC_APM_SECRET_TOKEN:-"SuPeRsEcReT"} 32 | export APM_SERVER_URL=${APM_SERVER_URL:-"https://apm-server:8200"} 33 | export PYTHONHTTPSVERIFY=0 34 | DEFAULT_COMPOSE_ARGS="${ELASTIC_STACK_VERSION} ${BUILD_OPTS}\ 35 | --no-apm-server-self-instrument \ 36 | --apm-server-enable-tls \ 37 | --no-verify-server-cert \ 38 | --apm-server-secret-token=${ELASTIC_APM_SECRET_TOKEN} \ 39 | --apm-server-url=${APM_SERVER_URL} \ 40 | --apm-log-level=debug" 41 | 42 | export COMPOSE_ARGS=${COMPOSE_ARGS:-${DEFAULT_COMPOSE_ARGS}} 43 | runTests "$@" 44 | } 45 | 46 | function prepareAndRunGoals() { 47 | DEFAULT_COMPOSE_ARGS="${ELASTIC_STACK_VERSION} \ 48 | --no-apm-server-self-instrument" 49 | export COMPOSE_ARGS=${COMPOSE_ARGS:-${DEFAULT_COMPOSE_ARGS}} 50 | runTests "$@" 51 | } 52 | 53 | if [ -n "${APM_SERVER_BRANCH}" ]; then 54 | APM_SERVER_BRANCH_VERSION=${APM_SERVER_BRANCH%;*} 55 | APM_SERVER_BRANCH_TYPE=${APM_SERVER_BRANCH//$APM_SERVER_BRANCH_VERSION/} 56 | APM_SERVER_BRANCH_TYPE=${APM_SERVER_BRANCH_TYPE//;/} 57 | if [ "${APM_SERVER_BRANCH_TYPE}" != "--release" ]; then 58 | BUILD_OPTS="${BUILD_OPTS} --apm-server-build https://github.com/elastic/apm-server.git@${APM_SERVER_BRANCH_VERSION}" 59 | else 60 | ELASTIC_STACK_VERSION="${APM_SERVER_BRANCH_VERSION} --release --apm-server-managed --with-elastic-agent" 61 | fi 62 | fi 63 | 64 | if [ -z "${DISABLE_BUILD_PARALLEL}" ] || [ "${DISABLE_BUILD_PARALLEL}" = "false" ]; then 65 | BUILD_OPTS="${BUILD_OPTS} --build-parallel" 66 | fi 67 | 68 | ELASTIC_STACK_VERSION=${ELASTIC_STACK_VERSION:-"8.11.3"} 69 | 70 | echo "ELASTIC_STACK_VERSION=${ELASTIC_STACK_VERSION}" 71 | echo "APM_SERVER_BRANCH_VERSION=${APM_SERVER_BRANCH_VERSION}" 72 | echo "APM_SERVER_BRANCH_TYPE=${APM_SERVER_BRANCH_TYPE}" 73 | echo "BUILD_OPTS=${BUILD_OPTS}" 74 | 75 | # Install virtualenv 76 | python3 -m pip install --user virtualenv 77 | -------------------------------------------------------------------------------- /docker/metricbeat/metricbeat.6.x-compat.yml: -------------------------------------------------------------------------------- 1 | --- 2 | setup.template.settings: 3 | index.number_of_shards: 1 4 | index.codec: best_compression 5 | index.number_of_replicas: 0 6 | 7 | setup.kibana: 8 | host: "${KIBANA_HOST:kibana:5601}" 9 | 10 | output.elasticsearch: 11 | hosts: '${ELASTICSEARCH_HOSTS:elasticsearch:9200}' 12 | username: '${ELASTICSEARCH_USERNAME:}' 13 | password: '${ELASTICSEARCH_PASSWORD:}' 14 | 15 | logging.json: true 16 | logging.metrics.enabled: false 17 | 18 | xpack.monitoring.enabled: true 19 | ################################################################################################### 20 | ## modules 21 | ################################################################################################### 22 | metricbeat.config.modules: 23 | path: /dev/null 24 | reload.enabled: false 25 | 26 | metricbeat.modules: 27 | - module: golang 28 | metricsets: ["expvar", "heap"] 29 | period: 10s 30 | hosts: ["${APM_SERVER_PPROF_HOST:apm-server:6060}"] 31 | heap.path: "/debug/vars" 32 | expvar: 33 | namespace: "apm-server" 34 | path: "/debug/vars" 35 | - module: docker 36 | metricsets: ["container", "cpu", "diskio", "healthcheck", "info", "memory", "network"] 37 | hosts: ["unix:///var/run/docker.sock"] 38 | period: 10s 39 | 40 | metricbeat.autodiscover: 41 | providers: 42 | - type: docker 43 | templates: 44 | - condition: 45 | contains: 46 | docker.container.image: "redis" 47 | config: 48 | - module: redis 49 | metricsets: ["info", "keyspace"] 50 | hosts: "${data.host}:6379" 51 | - condition: 52 | contains: 53 | docker.container.image: "postgres" 54 | config: 55 | - module: postgresql 56 | metricsets: ["database", "bgwriter", "activity"] 57 | hosts: ["postgres://${data.host}:5432?sslmode=disable"] 58 | password: verysecure 59 | username: postgres 60 | - condition: 61 | contains: 62 | docker.container.image: "elasticsearch" 63 | config: 64 | - module: elasticsearch 65 | metricsets: ["node", "node_stats"] 66 | period: 10s 67 | hosts: "${data.host}:9200" 68 | - condition: 69 | contains: 70 | docker.container.image: "kafka" 71 | config: 72 | - module: kafka 73 | metricsets: ["consumergroup", "partition"] 74 | period: 10s 75 | hosts: "${data.host}:9092" 76 | - condition: 77 | contains: 78 | docker.container.image: "logstash" 79 | config: 80 | - module: logstash 81 | metricsets: ["node", "node_stats"] 82 | period: 10s 83 | hosts: "${data.host}:9600" 84 | -------------------------------------------------------------------------------- /docker/apm-server/recorder/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "compress/gzip" 5 | "compress/zlib" 6 | "encoding/json" 7 | "errors" 8 | "flag" 9 | "fmt" 10 | "io" 11 | "io/ioutil" 12 | "log" 13 | "net/http" 14 | "os" 15 | "sync" 16 | "time" 17 | ) 18 | 19 | // syncWriter helps sync batches of writes 20 | type syncWriter struct { 21 | sync.Mutex 22 | io.Writer 23 | } 24 | 25 | func newSyncWriter(w io.Writer) syncWriter { 26 | return syncWriter{Writer: w} 27 | } 28 | 29 | func bodyReader(req *http.Request) (io.ReadCloser, error) { 30 | if req.Body == nil { 31 | return nil, errors.New("no content") 32 | } 33 | 34 | switch req.Header.Get("Content-Encoding") { 35 | case "deflate": 36 | return zlib.NewReader(req.Body) 37 | case "gzip": 38 | return gzip.NewReader(req.Body) 39 | } 40 | return req.Body, nil 41 | } 42 | 43 | func payloadRecorder(record syncWriter) http.Handler { 44 | jsonEncoder := json.NewEncoder(record) 45 | return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { 46 | reply := func(a ...interface{}) (n int, err error) { 47 | return fmt.Fprintln(w, a...) 48 | } 49 | // ignore healthchecks 50 | if req.URL.Path == "/" { 51 | reply("ok") 52 | return 53 | } 54 | 55 | event := map[string]interface{}{ 56 | "time": time.Now().UTC(), 57 | "method": req.Method, 58 | "url": req.URL.Path, 59 | } 60 | defer func() { 61 | record.Lock() 62 | if err := jsonEncoder.Encode(event); err != nil { 63 | log.Println(err) 64 | } 65 | record.Unlock() 66 | }() 67 | var body io.ReadCloser 68 | if br, err := bodyReader(req); err != nil { 69 | event["error"] = err 70 | reply(err.Error()) 71 | return 72 | } else { 73 | body = br 74 | } 75 | // hope it's not too big 76 | b, err := ioutil.ReadAll(body) 77 | if err != nil { 78 | event["error"] = err 79 | reply(err.Error()) 80 | return 81 | } 82 | if len(b) > 0 { 83 | event["body"] = string(b) 84 | } 85 | reply("ok") 86 | }) 87 | } 88 | 89 | func main() { 90 | flag.String("e", "", "apm-server compatility option") 91 | flag.String("E", "", "apm-server compatility option") 92 | flag.String("httpprof", "", "apm-server compatility option") 93 | 94 | addr := flag.String("addr", ":8200", "HTTP listen address") 95 | out := flag.String("out", "events.json", "path to record") 96 | console := flag.Bool("console", false, "also dump events to stdout") 97 | flag.Parse() 98 | 99 | outfile, err := os.Create(*out) 100 | if err != nil { 101 | log.Fatal(err) 102 | } 103 | defer outfile.Close() 104 | var record io.Writer = outfile 105 | if *console { 106 | record = io.MultiWriter(record, os.Stdout) 107 | } 108 | sw := newSyncWriter(record) 109 | 110 | s := &http.Server{ 111 | Addr: *addr, 112 | Handler: payloadRecorder(sw), 113 | ReadTimeout: 1 * time.Minute, 114 | WriteTimeout: 1 * time.Minute, 115 | } 116 | log.Fatal(s.ListenAndServe()) 117 | } 118 | -------------------------------------------------------------------------------- /docker/apm-server/pipelines/opbeans-servicemap.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "id": "apm", 4 | "body": { 5 | "description" : "Default enrichment for APM events", 6 | "processors" : [ 7 | { 8 | "pipeline": { 9 | "name": "apm_user_agent" 10 | } 11 | }, 12 | { 13 | "pipeline": { 14 | "name": "apm_user_geo" 15 | } 16 | }, 17 | { 18 | "pipeline": { 19 | "name": "opbeans_servicemap" 20 | } 21 | } 22 | ] 23 | } 24 | }, 25 | { 26 | "id": "apm_user_agent", 27 | "body": { 28 | "description" : "Add user agent information for APM events", 29 | "processors" : [ 30 | { 31 | "user_agent" : { 32 | "field": "user_agent.original", 33 | "target_field": "user_agent", 34 | "ignore_missing": true, 35 | "ignore_failure": true 36 | } 37 | } 38 | ] 39 | } 40 | }, 41 | { 42 | "id": "apm_user_geo", 43 | "body": { 44 | "description" : "Add user geo information for APM events", 45 | "processors" : [ 46 | { 47 | "geoip" : { 48 | "database_file": "GeoLite2-City.mmdb", 49 | "field": "client.ip", 50 | "target_field": "client.geo", 51 | "ignore_missing": true, 52 | "on_failure": [ 53 | { 54 | "remove": { 55 | "field": "client.ip", 56 | "ignore_missing": true, 57 | "ignore_failure": true 58 | } 59 | } 60 | ] 61 | } 62 | } 63 | ] 64 | } 65 | }, 66 | { 67 | "id": "opbeans_servicemap", 68 | "body": { 69 | "description": "sets destination on ext spans based on their name", 70 | "processors": [ 71 | { 72 | "set": { 73 | "if": "ctx.span != null && ctx.span.type == 'ext'", 74 | "field": "span.type", 75 | "value": "external" 76 | } 77 | }, 78 | { 79 | "script": "\n if(ctx['span'] != null) {\n if (ctx['span']['type'] == 'external') {\n def spanName = ctx['span']['name'];\n if (spanName.indexOf('/') > -1) {\n spanName = spanName.substring(0, spanName.indexOf('/'));\n } \n if (spanName.indexOf(' ') > -1) {\n spanName = spanName.substring(spanName.indexOf(' ')+1, spanName.length());\n } \n ctx['destination.address'] = spanName;\n } \n if (ctx['span']['type'] == 'resource') {\n def spanName = ctx['span']['name'];\n if (spanName.indexOf('://') > -1) {\n spanName = spanName.substring(spanName.indexOf('://')+3, spanName.length());\n } \n if (spanName.indexOf('/') > -1) {\n spanName = spanName.substring(0, spanName.indexOf('/'));\n } \n ctx['destination.address'] = spanName;\n } \n if (ctx['span']['type'] == 'db') {\n def dest = ctx['span']['subtype'];\n ctx['destination.address'] = dest;\n } \n if (ctx['span']['type'] == 'cache') {\n def dest = ctx['span']['subtype'];\n ctx['destination.address'] = dest;\n } \n }" 80 | } 81 | ] 82 | } 83 | } 84 | ] 85 | -------------------------------------------------------------------------------- /scripts/tls/apm-server/key.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN PRIVATE KEY----- 2 | MIIJRAIBADANBgkqhkiG9w0BAQEFAASCCS4wggkqAgEAAoICAQDXbo97cngSB49A 3 | X+WijESJoUW5AGkNqek9FrBypYnHdT+Pw+Ze/N/r1bG8Ka0akalwFLFFnFEmCVwV 4 | 9arQtjLSqouMWFEwMkFeXYKzKAlfr7WWN9UiuJKJbpndb1TIfqaPiMLni7yzBStb 5 | UBiF5SW5Ug2FWriLea6e/SCIVjmP2KXEMvUWscpJY02E2z4y1pt0/dm6MNZy4eRi 6 | 904SRAJCzvoZCb/e9evmEVmLT3KI4hs6S4xYBedDKD+C4pZvVK9rQZ6qb29j8BNZ 7 | JJcvAwwbXIPhzOLYBrG3VqNj4KfNgEVeAy/iZTSfklUGQJk23NyzMMZ1CbbijHod 8 | q8G/RzsrjjV3xdT4hM8g4QJWm8kuraABstZpTAKKdQpTtcFeTgMsuO03g+s4jcjC 9 | ge/1x3VuK29eiuo+hFduwYNmSula35MJt2kW6c7X1UTF/hzFKZLR/ldjsCaD+5e5 10 | c8P6vMjHaKRue80SWtXcfDVj/1CovBI/vOw2MliTg4jRw+zzO/b33eO5TXFscPqe 11 | 57RKZ5+hHltq+0pZn4bXSMS9g4k4igKvSrcovXBZxpw30gyUk09RtbyA3Rt91bdm 12 | OfsKswhm2MjLADmjHV2Unp4l1xUeBl+4s/gBqCqIwW5qKPVaj5r27nTVPsrIWuCC 13 | b03znMyWYLLcQlxvjF2xFxUoMrCJhQIDAQABAoICAQCfClIGsoUN2mLZBXLDw4W9 14 | jT+pyjHEEpHLtXphyO+kPlzER71Elq7AriveW24d1TcfNUeBulr2F6bR12FZX4i5 15 | mYoX/AND73Xusl4Q4Re6ej82PNWuIlCcAPi6Trxqn4VbJX2t7q1KBCDz8neIMZjd 16 | 7UNqFYV0Akr1uK1RuUYZebk21N+29139O8A4upp6cZCml9kq6W8HtNgkb6pFNcvt 17 | gluELHxnn2mdmWVfwTEu+K1dJfTf7svB+m6Ys6qXWg9+wRzfehDj2JKQFsE9xaQk 18 | dvItulIlZRvB28YXr/xxa6bKNtQc8NYej6sRSJNTu017RCDeumM3cLmeOfR4v59f 19 | tkMWnFcA3ykmsaK2FiQyX+MoWvs5vdT7/yNIfz3a4MErcWg8z3FDbffKfbhgsb+2 20 | z4Ub6fIRKZykW2ajN7t0378bMmJ3rPT66QF40aNNeWasF3EHcwekDPpsHIBJoY4G 21 | 9aG6uTUmRkC+NGeP9HroxkvDo2NbXn8XGOEJS64rwsME3CsUi1A5ZY0XLTxYptH6 22 | X2TfC5oTmnsYB/wWqo26bTJc0bwDOueQWYap0aVtv3f/0tzueKepCbxdeG4ikA0U 23 | 2t3F+OUmoCZ5D0p+6zLvrTUPhPCFEynp+vGUvmbwozYi0NWzFyFqlvqRG1KLIVLG 24 | ZRyTMYuZ/cWkv1SJYbEcaQKCAQEA/9HaJg2YACv7rx6/FesE/81u16OYTaahHngW 25 | 4M+5rT0+fNKYH/fYkwavQ/Gr6FSTls7F+8K9DVwoGLZRQ3t6epCXqGqX0uaY+iSH 26 | O8eezXVnHzUaVE4KlwJY9xZ+K1iIf5zUb5hpaQI0jKS/igcxFAsutWiyenrz8eQp 27 | MAycZmzkQMLbUsa1t6y0VaEaC4YMHyQ9ag2eMfqbG27plFQbYxllHXowGMFXPheY 28 | xACwo5V5tJUgRP+HlrI4rf0vadMgVIKxVSUiqIzGREIkYrTAshFjkpHR5/R8s/kH 29 | Xm8q2gdoJltBFJzA2B8MHXVi7mYDBlUmBoRKhzkl/TSray9j7wKCAQEA15VsNQZu 30 | cZluboz/R4EDbEm1po2UBcNNiu/fgJ8BDUkLzJESIITY41fgvBbTun1fiuGeE+El 31 | 0o1w4hQhIiV1KAB44w69fJR0VELfMZiIcd8kd0sDgPPVrd1MzzKPZ9yg4mbEkCCO 32 | V/EoTi8Ut27sMcl8059qm1qq7I5pzHwSziNa087m+5VdfmvJZJVipudngZ3QmRgU 33 | KKcBhgFFSkncYezoq2XQfRcqkk0sORxDvsMmRInyHZh0l9zv46ihgTvErlCHtizV 34 | V4HNO4OPz7FxUZ04iWSGZs4snu1cW2j+lbKuOkADveBYVmCcdZ3R0SH+A5skL0zG 35 | tm6z0TNP/kFlywKCAQEA+lTdFu2od0qTADujG4yemL7rn2J8EEhlU86J/LXo6UiM 36 | FFNz/5xltwIMkf00jqXswt9WR9W5cBBlQEFwZgu3v6YscebU6NE0k1sZZnshv8YK 37 | AjTRrfusSzdF3YyKLFp3QAE0tHs9cz9wMsyojiYZdZa3v1dTh503h9YQI+/DQEuA 38 | VIsZWfgPLEx5L231cZ9bz0GEQ3pN+nRUQdUYB0kCf8gC9YRy+lZ/y8gFeo9+SqVj 39 | sj1XlY1DnkiKRGAEfJbYBTra0woCz1LqVTMwLdLY2adAe9XrxQKu4OJovpUkJrSm 40 | yxnzJnt6DkLbdRxAki8K+LBsBGaCE67tqMhYkguOywKCAQAslEl77YiJFSEw2xcu 41 | wg7jJZrahgxF5Mz0HgYporek96Xo91a4QsBWwqVGP7IoriRDo8P8eGJJ19Wv6lmv 42 | pe9EBlT5HuMwD8K+adWde907Ltlrkad30vQsr8ZiUiI1Z/oc1wNuikzlAolDIZk3 43 | FUjiQrf9SsnQtj8CC7D1B/MbjVQK2I4LGCftLHzIv9tWiCNvOiMYhVIl1eMKwtiB 44 | NCTOWx8B0lv6gf/boPm0FZQsrk4LfjsCw7PYc2dnvEcpYiKZqS1nDn5PShgWZm4m 45 | lJrKNairQI5KU/gGJS8j9+ItMnW0tegQK4QY2IGCENCCXnUYacxhu46byuiEKggw 46 | m3VhAoIBAQCQa90StsZHqZ+J83do3kpvD+O5nURPnckznC2WJgraW49k5vltnJTT 47 | zkFTqHMLfmYwAz1o15sPCqlkMD+fEUzg6Hpzxm7dOUppkf5KFbD7AnsYU9U8LamJ 48 | HaET7Dq5TpjG7uoaHZZjs7cCHcWu2E8nIezyAtZ+rbTg/qW7bYMAlJTkerznGuDU 49 | v0hNzCr/81o5rbX0UhetcmKVOprUSWzfrw5ElLhAtzM7zivbZSnsOny8pC33FtQ5 50 | iQbVcNGUjfFCM95ZipxxN9z0FwxpJ1paCPGYA86u2olWl/VnVPqEj7WYzO8H5W2q 51 | aXpWH6HVf6B10pQrWWwUAAHyqYS5bZkQ 52 | -----END PRIVATE KEY----- 53 | -------------------------------------------------------------------------------- /scripts/modules/proxy.py: -------------------------------------------------------------------------------- 1 | import json 2 | from .service import Service 3 | from .helpers import wget_healthcheck 4 | from .opbeans import OpbeansService, OpbeansRum 5 | 6 | 7 | class Dyno(Service): 8 | """ 9 | Dyno is the management interface for the 10 | proxy services 11 | """ 12 | SERVICE_PORT = 9999 13 | opbeans_side_car = False 14 | 15 | def __init__(self, **options): 16 | super(Dyno, self).__init__(**options) 17 | 18 | def _content(self): 19 | return dict( 20 | build=dict( 21 | context="docker/dyno", 22 | dockerfile="Dockerfile", 23 | args=[] 24 | ), 25 | environment={"TOXI_HOST": "toxi", "TOXI_PORT": "8474"}, 26 | container_name="dyno", 27 | image=None, 28 | labels=None, 29 | logging=None, 30 | healthcheck=wget_healthcheck(8000, path="/"), 31 | ports=["9000:8000"], 32 | volumes=["/var/run/docker.sock:/var/run/docker.sock", "./docker/dyno:/dyno"], 33 | ) 34 | 35 | 36 | class Toxi(Service): 37 | SERVICE_PORT = 8474 38 | opbeans_side_car = False 39 | 40 | def __init__(self, **options): 41 | self.service_offset = 10000 42 | super(Toxi, self).__init__(**options) 43 | self.generated_ports = [self.publish_port(self.port, self.SERVICE_PORT, expose=True)] 44 | 45 | def _content(self): 46 | return dict( 47 | healthcheck=wget_healthcheck(8474, path="/proxies"), 48 | image="shopify/toxiproxy", 49 | labels=None, 50 | ports=self.generated_ports, 51 | volumes=["./docker/toxi/toxi.cfg:/toxi/toxi.cfg"], 52 | command=["-host=0.0.0.0", "-config=/toxi/toxi.cfg"], 53 | restart='on-failure', 54 | ) 55 | 56 | def gen_ports(self, services): 57 | """ 58 | Take the services we know about and look for user-facing 59 | instances and be sure to expose them from our container 60 | """ 61 | for s in sorted(services, key=lambda x: x.name()): 62 | if isinstance(s, OpbeansService) or s is OpbeansRum: # is opbeans service 63 | self.generated_ports.append("{}:{}".format(s.SERVICE_PORT, s.SERVICE_PORT)) 64 | 65 | def gen_config(self, services): 66 | config = [] 67 | opbeans_sidecars = ['postgres', 'redis', 'opbeans-load-generator'] 68 | opbeans_2nds = ('opbeans-go01', 'opbeans-java01', 'opbeans-python01', 'opbeans-ruby01', 'opbeans-dotnet01', 69 | 'opbeans-node01') 70 | for s in sorted(services, key=lambda x: x.name()): 71 | # TODO refactor this for DRY 72 | is_opbeans_service = isinstance(s, OpbeansService) or s is OpbeansRum 73 | is_opbeans_sidecar = s.name() in opbeans_sidecars 74 | is_opbeans_2nd = s.name() in opbeans_2nds 75 | 76 | if hasattr(s, "SERVICE_PORT") and not s.name().startswith('toxi') and \ 77 | (is_opbeans_service or is_opbeans_sidecar or is_opbeans_2nd): 78 | 79 | sp = int(s.SERVICE_PORT) 80 | if is_opbeans_service: 81 | # We use APPLICATION_PORT because we want the container port and not the exposed port 82 | upstream_port = s.APPLICATION_PORT 83 | else: 84 | upstream_port = sp 85 | 86 | service_def = { 87 | "name": s.name(), 88 | "listen": "[::]:{}".format(sp), 89 | "upstream": "{}:{}".format(s.name(), upstream_port), 90 | "enabled": True 91 | } 92 | config.append(service_def) 93 | ret = json.dumps(config, sort_keys=True, indent=4) 94 | return ret 95 | -------------------------------------------------------------------------------- /scripts/kibana/validate-ts-interfaces-against-apm-server-sample-docs/scripts/download-sample-docs.ts: -------------------------------------------------------------------------------- 1 | import prettier from 'prettier'; 2 | import axios from 'axios'; 3 | import { promisify } from 'util'; 4 | import fs from 'fs'; 5 | const writeFile = promisify(fs.writeFile); 6 | import prettierRc from '../.prettierrc.json'; 7 | 8 | const [owner = 'elastic', branch = 'main'] = process.argv.slice(2); 9 | console.log(`Downloading sample docs: ${owner}:${branch}`); 10 | 11 | interface DocType { 12 | name: string; 13 | url: string; 14 | getFileContent: (data: any) => string; 15 | } 16 | 17 | const docTypes: DocType[] = [ 18 | { 19 | name: 'spans', 20 | url: `https://raw.githubusercontent.com/elastic/apm-server/${branch}/beater/test_approved_es_documents/TestPublishIntegrationSpans.approved.json`, 21 | getFileContent: data => { 22 | return `import { SpanRaw } from '../apm-ui-interfaces/raw/span_raw'; 23 | import { AllowUnknownProperties } from '../../scripts/helpers'; 24 | export const sampleDoc:AllowUnknownProperties[] = ${JSON.stringify( 25 | data.events 26 | )}`; 27 | } 28 | }, 29 | { 30 | name: 'transactions', 31 | url: `https://raw.githubusercontent.com/elastic/apm-server/${branch}/beater/test_approved_es_documents/TestPublishIntegrationTransactions.approved.json`, 32 | getFileContent: data => { 33 | return `import { TransactionRaw } from '../apm-ui-interfaces/raw/transaction_raw'; 34 | import { AllowUnknownProperties } from '../../scripts/helpers'; 35 | export const sampleDoc:AllowUnknownProperties[] = ${JSON.stringify( 36 | data.events 37 | )}`; 38 | } 39 | }, 40 | { 41 | name: 'errors', 42 | url: `https://raw.githubusercontent.com/elastic/apm-server/${branch}/beater/test_approved_es_documents/TestPublishIntegrationErrors.approved.json`, 43 | getFileContent: data => { 44 | return `import { ErrorRaw } from '../apm-ui-interfaces/raw/error_raw'; 45 | import { AllowUnknownProperties } from '../../scripts/helpers'; 46 | export const sampleDoc:AllowUnknownProperties[] = ${JSON.stringify( 47 | data.events 48 | )}`; 49 | } 50 | }, 51 | { 52 | name: 'metrics', 53 | url: `https://raw.githubusercontent.com/elastic/apm-server/${branch}/beater/test_approved_es_documents/TestPublishIntegrationMetricsets.approved.json`, 54 | getFileContent: data => { 55 | return `import { MetricRaw } from '../apm-ui-interfaces/raw/metric_raw'; 56 | import { AllowUnknownProperties } from '../../scripts/helpers'; 57 | export const sampleDoc:AllowUnknownProperties[] = ${JSON.stringify( 58 | data.events 59 | )}`; 60 | } 61 | }, 62 | { 63 | name: 'minimal', 64 | url: `https://raw.githubusercontent.com/elastic/apm-server/${branch}/beater/test_approved_es_documents/TestPublishIntegrationMinimalEvents.approved.json`, 65 | getFileContent: data => { 66 | return `import { SpanRaw } from '../apm-ui-interfaces/raw/span_raw'; 67 | import { TransactionRaw } from '../apm-ui-interfaces/raw/transaction_raw'; 68 | import { ErrorRaw } from '../apm-ui-interfaces/raw/error_raw'; 69 | import { MetricRaw } from '../apm-ui-interfaces/raw/metric_raw'; 70 | import { AllowUnknownProperties } from '../../scripts/helpers'; 71 | export const sampleDoc: AllowUnknownProperties< 72 | SpanRaw | TransactionRaw | ErrorRaw | MetricRaw 73 | >[] = ${JSON.stringify(data.events)}`; 74 | } 75 | } 76 | ]; 77 | 78 | const promises = docTypes.map(async docType => { 79 | const fileName = `./tmp/apm-server-docs/${docType.name}.ts`; 80 | const { data } = await axios.get(docType.url); 81 | 82 | const content = docType.getFileContent(data); 83 | 84 | const formattedContent = prettier.format(content, { 85 | ...prettierRc, 86 | parser: 'babel' 87 | }); 88 | 89 | await writeFile(fileName, formattedContent); 90 | }); 91 | 92 | Promise.all(promises); 93 | -------------------------------------------------------------------------------- /docker/opbeans/dotnet/run.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -x 3 | 4 | git clone https://github.com/"${OPBEANS_DOTNET_REPO}".git /src/opbeans-dotnet 5 | cd /src/opbeans-dotnet || exit 6 | git fetch -q origin '+refs/pull/*:refs/remotes/origin/pr/*' 7 | git checkout "${OPBEANS_DOTNET_BRANCH}" 8 | 9 | CSPROJ="opbeans-dotnet.csproj" 10 | 11 | PACKAGE=Elastic.Apm.NetCoreAll 12 | SRC_AGENT=/src/dotnet-agent 13 | CSPROJ_VERSION="${SRC_AGENT}/src/Elastic.Apm.NetCoreAll/${PACKAGE}.csproj" 14 | BUILD_PROPS="${SRC_AGENT}/src/Directory.Build.props" 15 | 16 | if [ -z "${DOTNET_AGENT_VERSION}" ] ; then 17 | git clone https://github.com/"${DOTNET_AGENT_REPO}".git $SRC_AGENT 18 | cd $SRC_AGENT || exit 19 | git fetch -q origin '+refs/pull/*:refs/remotes/origin/pr/*' 20 | git checkout "${DOTNET_AGENT_BRANCH}" 21 | 22 | ### Otherwise: /usr/share/dotnet/sdk/2.2.203/NuGet.targets(119,5): error : The local source '/src/local-packages' doesn't exist. [/src/dotnet-agent/ElasticApmAgent.sln] 23 | mkdir /src/local-packages 24 | 25 | ### Errorlevels might happen when fetching PRs with some errors like: error: cannot lock ref 'refs/remotes/origin/pr/82/head': 'refs/remotes/origin/pr/82' exists; cannot create 26 | ### Let's fail if something bad happens when building the agent from the source code 27 | set -e 28 | # Remove Full Framework projects with backward compatibility 29 | if [ -e .ci/linux/remove-projects.sh ] ; then 30 | .ci/linux/remove-projects.sh 31 | else 32 | ## See https://github.com/elastic/apm-agent-dotnet/blob/480be30a699ba276ebd2a7055083e92f9f1e2207/.ci/linux/test.sh#L9-L11 33 | dotnet sln remove sample/AspNetFullFrameworkSampleApp/AspNetFullFrameworkSampleApp.csproj 34 | dotnet sln remove src/Elastic.Apm.AspNetFullFramework/Elastic.Apm.AspNetFullFramework.csproj 35 | dotnet sln remove test/Elastic.Apm.AspNetFullFramework.Tests/Elastic.Apm.AspNetFullFramework.Tests.csproj 36 | fi 37 | 38 | dotnet restore 39 | dotnet pack -c Release -o /src/local-packages 40 | 41 | cd /src/opbeans-dotnet/opbeans-dotnet || exit 42 | mv /src/NuGet.Config . 43 | # shellcheck disable=SC2016 44 | sed -ibck 's##$(RestoreSources);/src/local-packages;https://api.nuget.org/v3/index.json#' ${CSPROJ} 45 | 46 | ### Search the version of the agent using VersionPrefix otherwise PackageVersion (to keep backward compatibility) 47 | SEARCH="VersionPrefix" 48 | if ! grep ${SEARCH} ${BUILD_PROPS} ; then 49 | SEARCH="PackageVersion" 50 | fi 51 | 52 | DOTNET_AGENT_VERSION=$(grep "${SEARCH}" ${BUILD_PROPS} | sed 's#<.*>\(.*\)<.*>#\1#' | tr -d " ") 53 | if [ -z "${DOTNET_AGENT_VERSION}" ] ; then 54 | echo 'INFO: search version in the csproj. (only for agent version < 1.3)' 55 | DOTNET_AGENT_VERSION=$(grep 'PackageVersion' ${CSPROJ_VERSION} | sed 's#<.*>\(.*\)<.*>#\1#' | tr -d " ") 56 | if [ -z "${DOTNET_AGENT_VERSION}" ] ; then 57 | ## From 1.22 onwards (see https://github.com/elastic/apm-agent-dotnet/pull/2063) 58 | pushd $SRC_AGENT 59 | dotnet tool restore 60 | DOTNET_AGENT_VERSION=$(dotnet minver -t=v -p=canary.0 -v=e) 61 | if [ -z "${DOTNET_AGENT_VERSION}" ] ; then 62 | echo 'ERROR: DOTNET_AGENT_VERSION could not be calculated.' && exit 1 63 | fi 64 | popd 65 | fi 66 | fi 67 | 68 | dotnet add package ${PACKAGE} -v "${DOTNET_AGENT_VERSION}" 69 | else 70 | ### Otherwise: The default NuGet.Config will fail as it's required 71 | mkdir /src/local-packages 72 | fi 73 | 74 | cd /src/opbeans-dotnet/opbeans-dotnet || exit 75 | # This is the way to manipulate the csproj with the version of the dotnet agent to be used 76 | sed -ibck "s#\( 2 | 3 | 4 | 5 | 6 | Dyno 7 | 8 | 14 | 15 | 16 | 17 | 18 | 19 | 48 | 49 | 50 |
51 |

APM Dyno

52 |
53 | Dyno can load test your Opbeans applications.

54 | Start load tests for services on the left and control
55 | the performance of services using their respective sliders.

56 |
57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 |
Load generator
W: Workers
Er: Error rate
68 |
69 | 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | 80 | 81 | 82 | 83 | 84 | 85 |
Network conditions
L: LatencyJ: Jitter
B: BandwidthSC: Slow close
Sas: Slicer [Avg size]Ssv: Slicer [Delay]
86 |
87 | 88 | 89 | 90 | 91 | 92 | 93 | 94 | 95 | 96 | 97 |
Operating system  
CPU: Processor quota
Mem: Container memory
98 |
99 |
100 |
101 | 102 | 103 | 104 | 105 |
106 |
107 |
108 | 109 |

110 | 113 |
114 |
115 |
116 |
117 |
118 | 119 |
120 |
121 | 122 | 123 | -------------------------------------------------------------------------------- /docker/dyno/app/range.yml: -------------------------------------------------------------------------------- 1 | # Licensed to Elasticsearch B.V. under one or more contributor 2 | # license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright 4 | # ownership. Elasticsearch B.V. licenses this file to you under 5 | # the Apache License, Version 2.0 (the "License"); you may 6 | # not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, 12 | # software distributed under the License is distributed on an 13 | # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 14 | # KIND, either express or implied. See the License for the 15 | # specific language governing permissions and limitations 16 | # This file describes normalization for the sliders 17 | # 18 | # ======================================================== 19 | # Slider range definitions 20 | # ======================================================== 21 | # 22 | # Each slider can slide from values between 1-100, 23 | # where 100 is the highest position which represents 24 | # "maximum" pressure on a service and 0 is the lowest 25 | # slider position which represents no pressure being 26 | # applied to the service. 27 | # 28 | # However, the various toxics which can be applied to 29 | # a service need to have raw values supplied to them 30 | # and the associated units vary by service. For example, 31 | # with network toxics the raw value might be applied in 32 | # milliseconds while for container memory, the value might 33 | # be applied in megabytes. 34 | # 35 | # Therefore, we have this file which lists the lower and upper 36 | # bounds for each in raw units. When a slider is moved, we 37 | # do basic division to determine the value to pass into the 38 | # toxic itself. For example, if the range of the memory slider 39 | # in this file is between 100 MB and 1000MB, and the slider 40 | # is set to its midpoint (50), we subtract the lower bound 41 | # from the upper bound (900) and then multiply by 1/100 of 42 | # the slider value to reach the answer of 450. See the 43 | # implementation code for more details on this in action. 44 | # 45 | # Values are represented as a two-element list. The first element 46 | # is the low-bound, which should provide the *best* performance 47 | # (other than being disabled) and the latter value represents the 48 | # high-bound which should represent the *worst* performance outside 49 | # of simply disabling the service. 50 | 51 | ## Start the Toxi settings 52 | --- 53 | B: 54 | # Bandwidth: Limit a connection to a maximum number of kilobytes per second. 55 | # 1KB/sec -> 100KB/s 56 | - 5 57 | - 1 58 | L: 59 | # Latency: Add a delay to all data going through the proxy. The delay is equal to latency +/- jitter. 60 | # 0ms -> 1000ms 61 | - 1 62 | - 1000 63 | J: 64 | # Jitter: Add a delay to all data going through the proxy. The delay is equal to latency +/- jitter. 65 | # 0ms -> 1000ms 66 | - 1 67 | - 1000 68 | SC: 69 | # Slow close: Delay the TCP socket from closing until delay has elapsed. 70 | - 1 71 | - 1000 72 | T: 73 | # Timeout: Stops all data from getting through, and closes the connection after timeout. 74 | # If timeout is 0, the connection won't close, and data will be delayed until the toxic is removed. 75 | # 1ms -> 1000ms 76 | - 1000 77 | - 1 78 | 79 | # The following are slicer settings. 80 | # A slicer slices TCP data up into small bits, optionally adding a delay between each sliced "packet". 81 | 82 | Sas: # average_size: size in bytes of an average packet 83 | # 1 byte -> 1000 bytes 84 | - 1 85 | - 1000 86 | # Currently disabled because we need to bound it against Sas 87 | # FIXME: (should be smaller than average_size) 88 | Ssv: # size_variation: variation in bytes of an average packet 89 | - 1 90 | - 1000 91 | Sd: # time in microseconds to delay each packet by 92 | - 1 93 | - 50000 # All the way up to 50ms which will cause some massive destruction! 94 | Ld: # the size in bytes that should be sent before closing the connection 95 | - 1 96 | - 5000000 97 | ## End the Toxi settings 98 | 99 | ## Start the Docker settings 100 | ## For more information see: https://docs.docker.com/config/containers/resource_constraints/ 101 | cpu: 102 | # Impose a CPU CFS quota on the container. 103 | # The number of microseconds per --cpu-period that the container is limited to before throttled. 104 | - 25000 # Not totally sure this is also the default for quota. FIXME: Possible bug! 105 | - 1000 # 1,000 is the lower limit offered by the Docker API 106 | mem: 107 | # The maximum amount of memory the container can use. 108 | # Note: We're going to always assume MB 109 | - 2000 # FIXME What's the default out-of-the-box? 110 | - 5 # That outta do it. 4MB is the Docker-imposed limit. 111 | -------------------------------------------------------------------------------- /docker/dyno/tests/unit/test_docker.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | # Licensed to Elasticsearch B.V. under one or more contributor 4 | # license agreements. See the NOTICE file distributed with 5 | # this work for additional information regarding copyright 6 | # ownership. Elasticsearch B.V. licenses this file to you under 7 | # the Apache License, Version 2.0 (the "License"); you may 8 | # not use this file except in compliance with the License. 9 | # You may obtain a copy of the License at 10 | # 11 | # http://www.apache.org/licenses/LICENSE-2.0 12 | # 13 | # Unless required by applicable law or agreed to in writing, 14 | # software distributed under the License is distributed on an 15 | # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 16 | # KIND, either express or implied. See the License for the 17 | # specific language governing permissions and limitations 18 | 19 | """ 20 | Tests for the Openbeans Dyno Docker integration 21 | """ 22 | from pytest import mark, raises 23 | from unittest import mock 24 | from flask import url_for 25 | import dyno.app.api.docker as dkr 26 | 27 | CONTAINER_NAME_FUZZ = ['a_foo', 'b__foo', '_c_foo'] 28 | 29 | @mark.parametrize('container_fuzz', CONTAINER_NAME_FUZZ) 30 | @mock.patch('dyno.app.api.docker.container_list', return_value={'containers': CONTAINER_NAME_FUZZ}) 31 | def test_normalize_name_multiple(cl, container_fuzz): 32 | """ 33 | GIVEN multiple containers with names which end in `foo` 34 | WHEN the name ending in `foo` is passed into the _normalize_name function 35 | THEN function raises an exception 36 | """ 37 | with raises(Exception, match="more than one"): 38 | dkr._normalize_name('foo') 39 | 40 | 41 | @mock.patch('dyno.app.api.docker.container_list', return_value={'containers': CONTAINER_NAME_FUZZ}) 42 | def test_normalize_name_multiple_not_found(cl): 43 | """ 44 | GIVEN no containers which end in `baz` 45 | WHEN a name ending in `baz` if passed into the _normalize_name func 46 | THEN an exception is raised 47 | """ 48 | with raises(Exception, match="not found"): 49 | dkr._normalize_name('baz') 50 | 51 | @mock.patch('dyno.app.api.docker.client') 52 | def test_list(docker_mock, client): 53 | """ 54 | GIVEN an HTTP call to /docker/list 55 | WHEN the results are returned 56 | THEN the results contain a list of running containers 57 | """ 58 | fake_container = mock.Mock() 59 | fake_container.name = 'fake_container' 60 | list_mock = mock.Mock(return_value=[fake_container], name='list_mock') 61 | docker_mock.containers.list = list_mock 62 | ret = client.get(url_for('docker.container_list')) 63 | assert ret.json == {'containers': ['fake_container']} 64 | 65 | @mock.patch('dyno.app.api.docker._normalize_name', return_value='fake_container_name') 66 | def test_query(fake_container_patch, docker_inspect, client): 67 | """ 68 | GIVEN an HTTP call to /docker/query 69 | WHEN the results are returned 70 | THEN the results container info about the CPU and memory 71 | """ 72 | with mock.patch.object(dkr.low_client, 'inspect_container', return_value=docker_inspect): 73 | ret = client.get(url_for('docker.query'), query_string={'c': 'fake_container_name'}) 74 | assert ret.json['CPU'] == 1000 75 | assert ret.json['Mem'] == 200 76 | 77 | @mock.patch('dyno.app.api.docker.client', name='docker_mock') 78 | @mock.patch('dyno.app.api.docker._normalize_name', return_value='fake_container_name', name='normalize_mock') 79 | def test_update(fake_container_patch, docker_mock, client): 80 | """ 81 | GIVEN an HTTP call to /docker/update 82 | WHEN the call contains settings to be updated 83 | THEN the settings are updated 84 | """ 85 | fake_container = mock.Mock(name='fake_container') 86 | fake_container.name = 'fake_container' 87 | get_mock = mock.Mock(return_value=fake_container, name='get_mock') 88 | docker_mock.containers.get = get_mock 89 | client.get(url_for('docker.update'), query_string={'c': 'opbeans-python', 'component': 'CPU', 'val': 100}) 90 | 91 | fake_container.update.assert_called_with(cpu_quota=25990) 92 | 93 | # FIXME This is marked as xfail pending a centralization of the normalization functions 94 | @mark.xfail 95 | @mark.parametrize('val', range(1,101, 10)) 96 | @mock.patch('dyno.app.api.control._range', mock.Mock(return_value={'Fr': [1,10]})) 97 | def test_normalize(val): 98 | """ 99 | GIVEN values between 1-100 100 | WHEN the value is sent to be normalized 101 | THEN the correct normalized value is returned 102 | """ 103 | got = dkr._normalize_value('cpu', val) 104 | want = (101 - val) / 10 105 | assert got == want 106 | 107 | # FIXME This is marked as xfail pending a centralization of the normalization functions 108 | @mark.xfail 109 | @mark.parametrize('val', range(1,10)) 110 | @mock.patch('dyno.app.api.control._range', mock.Mock(return_value={'Fr': [1,10]})) 111 | def test_denormalize(val): 112 | """ 113 | GIVEN values between 1-100 114 | WHEN the value is sent to be denormalized 115 | THEN the correct normalized value is returned 116 | """ 117 | got = dkr._denormalize_value('cpu', val) 118 | want = 100 - (val * 10) 119 | assert got == want 120 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: help 2 | SHELL := /bin/bash 3 | PYTHON ?= python3 4 | VENV ?= ./venv 5 | 6 | COMPOSE_ARGS ?= 7 | 8 | JUNIT_RESULTS_DIR=tests/results 9 | JUNIT_OPT=--junitxml $(JUNIT_RESULTS_DIR) 10 | 11 | CERT_VALID_DAYS ?= 3650 12 | 13 | APM_SERVER_URL ?= http://apm-server:8200 14 | ES_URL ?= http://elasticsearch:9200 15 | KIBANA_URL ?= http://kibana:5601 16 | 17 | ES_USER ?= admin 18 | ES_PASS ?= changeme 19 | ELASTIC_APM_SECRET_TOKEN ?= SuPeRsEcReT 20 | 21 | PYTHONHTTPSVERIFY ?= 1 22 | 23 | PYTEST_ARGS ?= 24 | 25 | # Make sure we run local versions of everything, particularly commands 26 | # installed into our virtualenv with pip eg. `docker-compose`. 27 | export PATH := ./bin:$(VENV)/bin:$(PATH) 28 | 29 | export APM_SERVER_URL := $(APM_SERVER_URL) 30 | export KIBANA_URL := $(KIBANA_URL) 31 | export ES_URL := $(ES_URL) 32 | export ES_USER := $(ES_USER) 33 | export ES_PASS := $(ES_PASS) 34 | export ELASTIC_APM_SECRET_TOKEN := $(ELASTIC_APM_SECRET_TOKEN) 35 | export PYTHONHTTPSVERIFY := $(PYTHONHTTPSVERIFY) 36 | 37 | help: ## Display this help text 38 | @grep -E '^[a-zA-Z_-]+[%]?:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}' 39 | 40 | all: test 41 | 42 | # The tests are written in Python. Make a virtualenv to handle the dependencies. 43 | # make doesn't play nicely with custom VENV, intended only for CI usage 44 | venv: requirements.txt ## Prepare the virtual environment 45 | test -d $(VENV) || virtualenv -q --python=$(PYTHON) $(VENV);\ 46 | source $(VENV)/bin/activate || exit 1;\ 47 | pip install -q -r requirements.txt;\ 48 | touch $(VENV); 49 | 50 | lint: venv ## Lint the project 51 | source $(VENV)/bin/activate; \ 52 | flake8 --ignore=D100,D101,D102,D103,D104,D105,D106,D107,D200,D205,D400,D401,D403,W504 scripts/compose.py scripts/modules 53 | 54 | .PHONY: create-x509-cert 55 | create-x509-cert: ## Create an x509 certificate for use with the test suite 56 | openssl req -x509 -newkey rsa:4096 -keyout scripts/tls/key.pem -out scripts/tls/cert.crt -days "${CERT_VALID_DAYS}" -subj '/CN=apm-server' -nodes 57 | 58 | .PHONY: lint 59 | 60 | build-env: venv ## Build the test environment 61 | source $(VENV)/bin/activate; \ 62 | $(PYTHON) scripts/compose.py build $(COMPOSE_ARGS) 63 | docker-compose build --parallel 64 | 65 | start-env: venv ## Start the test environment 66 | source $(VENV)/bin/activate; \ 67 | $(PYTHON) scripts/compose.py start $(COMPOSE_ARGS) 68 | docker-compose up -d 69 | 70 | stop-env: venv ## Stop the test environment 71 | source $(VENV)/bin/activate; \ 72 | docker-compose down -v --remove-orphans || true 73 | 74 | destroy-env: venv ## Destroy the test environment 75 | [ -n "$$(docker ps -aqf network=apm-integration-testing)" ] && (docker ps -aqf network=apm-integration-testing | xargs -t docker rm -f && docker network rm apm-integration-testing) || true 76 | 77 | # default (all) built for now 78 | build-env-%: venv 79 | $(MAKE) build-env 80 | 81 | # default (all) started for now 82 | env-%: venv 83 | $(MAKE) start-env 84 | 85 | .PHONY: copy-events 86 | copy-events: 87 | docker cp $(shell docker-compose ps | grep intake-receiver | awk '{print $$1}'):/events . 88 | 89 | test: test-all test-helps ## Run all the tests 90 | 91 | test-compose: venv ## Test compose.py 92 | source $(VENV)/bin/activate; \ 93 | pytest $(PYTEST_ARGS) scripts/tests/test_*.py --reruns 3 --reruns-delay 5 -v -s $(JUNIT_OPT)/compose-junit.xml 94 | 95 | test-compose-2: 96 | virtualenv --python=python2.7 venv2 97 | ./venv2/bin/pip2 install mock pytest pyyaml 98 | ./venv2/bin/pytest $(PYTEST_ARGS) --noconftest scripts/tests/test_*.py 99 | 100 | SUBCOMMANDS = list-options load-dashboards start status stop upload-sourcemap versions 101 | 102 | test-helps: 103 | $(foreach subcommand,$(SUBCOMMANDS), $(PYTHON) scripts/compose.py $(subcommand) --help > /tmp/file-output && echo "Passed $(subcommand)" || { echo "Failed $(subcommand). See output: " ; cat /tmp/file-output ; exit 1; };) 104 | 105 | test-all: venv test-compose lint ## Run all the tests 106 | source $(VENV)/bin/activate; \ 107 | pytest -v -s $(PYTEST_ARGS) $(JUNIT_OPT)/all-junit.xml 108 | 109 | docker-test-%: ## Run a specific dockerized test. Ex: make docker-test-server 110 | TARGET=test-$* $(MAKE) dockerized-test 111 | 112 | dockerized-test: ## Run all the dockerized tests 113 | ./scripts/docker-summary.sh 114 | 115 | @echo running make $(TARGET) inside a container 116 | docker build --pull -t apm-integration-testing . 117 | 118 | mkdir -p -m 777 "$(PWD)/$(JUNIT_RESULTS_DIR)" 119 | chmod 777 "$(PWD)/$(JUNIT_RESULTS_DIR)" 120 | docker run \ 121 | --name=apm-integration-testing \ 122 | --network=apm-integration-testing \ 123 | --security-opt seccomp=unconfined \ 124 | -e APM_SERVER_URL \ 125 | -e ES_URL \ 126 | -e KIBANA_URL \ 127 | -e PYTHONDONTWRITEBYTECODE=1 \ 128 | -e PYTHONHTTPSVERIFY=$(PYTHONHTTPSVERIFY) \ 129 | -e ES_USER \ 130 | -e ES_PASS \ 131 | -e ELASTIC_APM_SECRET_TOKEN \ 132 | -e OTEL_EXPORTER_OTLP_ENDPOINT \ 133 | -e OTEL_EXPORTER_OTLP_HEADERS \ 134 | -e OTEL_SERVICE_NAME="apm-integration-testing" \ 135 | -e TRACEPARENT \ 136 | -e OTEL_EXPORTER_OTLP_INSECURE \ 137 | -v "$(PWD)/$(JUNIT_RESULTS_DIR)":"/app/$(JUNIT_RESULTS_DIR)" \ 138 | --rm \ 139 | --entrypoint make \ 140 | apm-integration-testing \ 141 | $(TARGET) 142 | 143 | @echo running make test-helps outside a container 144 | $(MAKE) test-helps 145 | 146 | .PHONY: test-% docker-test-% dockerized-test docker-compose-wait 147 | -------------------------------------------------------------------------------- /docker/filebeat/filebeat.6.x-compat.yml: -------------------------------------------------------------------------------- 1 | --- 2 | setup.template.settings: 3 | index.number_of_shards: 1 4 | index.codec: best_compression 5 | index.number_of_replicas: 0 6 | 7 | setup.kibana: 8 | host: "${KIBANA_HOST:kibana:5601}" 9 | 10 | output.elasticsearch: 11 | hosts: '${ELASTICSEARCH_HOSTS:elasticsearch:9200}' 12 | username: '${ELASTICSEARCH_USERNAME:}' 13 | password: '${ELASTICSEARCH_PASSWORD:}' 14 | 15 | logging.json: true 16 | logging.metrics.enabled: false 17 | 18 | xpack.monitoring.enabled: true 19 | ################################################################################################### 20 | ## autodiscover 21 | ################################################################################################### 22 | filebeat.autodiscover: 23 | providers: 24 | - type: docker 25 | templates: 26 | - condition: 27 | contains: 28 | docker.container.image: "apm-server" 29 | config: 30 | - type: docker 31 | containers.ids: 32 | - "${data.docker.container.id}" 33 | fields_under_root: true 34 | json.keys_under_root: true 35 | json.overwrite_keys: true 36 | json.add_error_key: true 37 | json.message_key: message 38 | - condition: 39 | contains: 40 | docker.container.image: "filebeat" 41 | config: 42 | - type: docker 43 | containers.ids: 44 | - "${data.docker.container.id}" 45 | fields_under_root: true 46 | json.keys_under_root: true 47 | json.overwrite_keys: true 48 | json.add_error_key: true 49 | json.message_key: message 50 | - condition: 51 | contains: 52 | docker.container.image: "heartbeat" 53 | config: 54 | - type: docker 55 | containers.ids: 56 | - "${data.docker.container.id}" 57 | fields_under_root: true 58 | json.keys_under_root: true 59 | json.overwrite_keys: true 60 | json.add_error_key: true 61 | json.message_key: message 62 | - condition: 63 | contains: 64 | docker.container.image: "kibana" 65 | config: 66 | - type: docker 67 | containers.ids: 68 | - "${data.docker.container.id}" 69 | fields_under_root: true 70 | json.keys_under_root: true 71 | json.overwrite_keys: true 72 | json.add_error_key: true 73 | json.message_key: message 74 | - condition: 75 | contains: 76 | docker.container.image: "metricbeat" 77 | config: 78 | - type: docker 79 | containers.ids: 80 | - "${data.docker.container.id}" 81 | fields_under_root: true 82 | json.keys_under_root: true 83 | json.overwrite_keys: true 84 | json.add_error_key: true 85 | json.message_key: message 86 | - condition: 87 | contains: 88 | docker.container.image: "opbeans-node" 89 | config: 90 | - type: docker 91 | containers.ids: 92 | - "${data.docker.container.id}" 93 | multiline.pattern: '^ ' 94 | multiline.negate: false 95 | multiline.match: after 96 | - condition: 97 | contains: 98 | docker.container.image: "opbeans-go" 99 | config: 100 | - type: docker 101 | containers.ids: 102 | - "${data.docker.container.id}" 103 | fields_under_root: true 104 | json.keys_under_root: true 105 | json.overwrite_keys: true 106 | json.add_error_key: true 107 | json.message_key: message 108 | - condition: 109 | contains: 110 | docker.container.image: "postgres" 111 | config: 112 | - type: docker 113 | containers.ids: 114 | - "${data.docker.container.id}" 115 | multiline.pattern: '^\t' 116 | multiline.negate: false 117 | multiline.match: after 118 | - condition: 119 | and: 120 | - not: 121 | contains: 122 | docker.container.image: "apm-server" 123 | - not: 124 | contains: 125 | docker.container.image: "filebeat" 126 | - not: 127 | contains: 128 | docker.container.image: "heartbeat" 129 | - not: 130 | contains: 131 | docker.container.image: "kibana" 132 | - not: 133 | contains: 134 | docker.container.image: "metricbeat" 135 | - not: 136 | contains: 137 | docker.container.image: "opbeans-node" 138 | - not: 139 | contains: 140 | docker.container.image: "opbeans-go" 141 | - not: 142 | contains: 143 | docker.container.image: "postgres" 144 | config: 145 | - type: docker 146 | containers.ids: 147 | - "${data.docker.container.id}" 148 | -------------------------------------------------------------------------------- /QUICKSTART.md: -------------------------------------------------------------------------------- 1 | # APM LocalEnv Quickstart 2 | 3 | In addition to the end to end (eg agent -> apm server -> elasticsearch <- kibana) development and testing of Elastic APM, this repo also serves as a nice way to spin up a local environment of Elastic APM. The [README](/README.md) has very detailed instructions, but focuses mostly on using it for development. This doc really just concentrates on getting it running. For advanced topics, check out the main README. 4 | 5 | Note that by "local environment", this can be on your actual local machine, or running on a cloud instance with port forwards set up. 6 | 7 | [![Build Status](https://apm-ci.elastic.co/view/All/job/elastic+apm-integration-testing+main+push/badge/icon?style=plastic)](https://apm-ci.elastic.co/job/elastic+apm-integration-testing+main+push/) 8 | 9 | ## Prerequisites 10 | 11 | The basic requirements for starting a local environment are: 12 | 13 | - Docker 14 | - Python (version 3 preferred) 15 | 16 | This repo is tested with Python 3 but best effort is made to make starting/stopping environments work with Python 2.7. 17 | 18 | ### Docker 19 | 20 | [Installation instructions](https://www.docker.com/community-edition) 21 | 22 | ### Python 3 23 | 24 | - Windows: [Installation instructions](https://www.python.org/downloads/windows/) 25 | - Mac (using [Homebrew](https://brew.sh/)): 26 | ```sh 27 | brew install python 28 | ``` 29 | - Debian/Ubuntu 30 | ```sh 31 | sudo apt-get install python3 32 | ``` 33 | 34 | ## Running Local Enviroments 35 | 36 | ### Starting an Environment 37 | 38 | The tool that we use to start and stop the environment is `./scripts/compose.py`. This provides a handy cli for starting an APM environment using docker-compose. 39 | 40 | #### TL;DR 41 | 42 | Start an env by running: 43 | `./scripts/compose.py start --all 6.4 --release` 44 | 45 | This will start a complete 6.4 environment, which includes all of the sample apps and hits them each with a load generator. Once that is done (and everything has started up) you can navigate to [Your local Kibana Instance](http://localhost:5601/app/apm#/) 46 | 47 | #### Details 48 | 49 | If you don't want to start everything (for example, on a laptop with limited resources while trying to run zoom at the same time) you can pick and choose which services you run. Say, for example, that you want to run node, java, and rum. You could use this command: 50 | ```console 51 | ./scripts/compose.py start \ 52 | --release \ 53 | --with-opbeans-node \ 54 | --with-opbeans-rum \ 55 | --with-opbeans-java \ 56 | 6.4 57 | ``` 58 | 59 | There are many other configuration options, but this is a quickstart. See the [README](/README.md). 60 | 61 | If you want to see what services are available to start, you can run: `./scripts/compose.py start --help | grep "^ --with-opbeans"` which will filter out a list of the agent envs: 62 | ```console 63 | --with-opbeans-dotnet Enable opbeans-dotnet 64 | --with-opbeans-go Enable opbeans-go 65 | --with-opbeans-java Enable opbeans-java 66 | --with-opbeans-node Enable opbeans-node 67 | --with-opbeans-python 68 | --with-opbeans-ruby Enable opbeans-ruby 69 | --with-opbeans-rum Enable opbeans-rum 70 | ``` 71 | So when new agents are added we don't have to update these instructions. 72 | 73 | 74 | **Bonus**: With either the `all` or individual methods above, you can also pass `--with-metricbeat` or `--with-filebeat` flags, which will also set up appropriate containers and dashboards. One side note here is that you will probably need to set a default index pattern. 75 | 76 | #### Status 77 | 78 | Each app gets its own port. You can actually hit them with your browser. They all have a similar look & feel. 79 | 80 | You can check the status of your APM cluster with `./scripts/compose.py status`, which basically calls : 81 | 82 | `docker ps --format 'table {{.Names}}\t{{.Ports}}'...` 83 | 84 | Here is a tablular view, excluding non-essentials: 85 | 86 | |Container Name | Link | 87 | |--------------------------------------------|----------------------------------------| 88 | |`localtesting_6.4.0_opbeans-rum` |[opbeans-rum](http://localhost:9222) (note - this needs chrome) | 89 | |`localtesting_6.4.0_opbeans-java` |[opbeans-java](http://localhost:3002) | 90 | |`localtesting_6.4.0_opbeans-dotnet` |[opbeans-dotnet](http://localhost:3004) | 91 | |`localtesting_6.4.0_opbeans-go` |[opbeans-go](http://localhost:3003) | 92 | |`localtesting_6.4.0_opbeans-node` |[opbeans-node](http://localhost:3000) | 93 | |`localtesting_6.4.0_opbeans-ruby` |[opbeans-ruby](http://localhost:3001) | 94 | |`localtesting_6.4.0_opbeans-python` |[opbeans-python](http://localhost:8000) | 95 | |`localtesting_6.4.0_kibana` |[kibana](http://localhost:5601) | 96 | |`localtesting_6.4.0_elasticsearch` |[elasticsearch](http://localhost:9200) | 97 | |`localtesting_6.4.0_apm-server` |[APM Server](http://localhost:8200) | 98 | 99 | You can attach your own APM agent to the APM server if you wish`.` 100 | 101 | ### Note for Cloud Instances 102 | 103 | If you want to run this on a cloud server (GCP, AWS), you will need to set up port forwarding to access them, and the easiest way to do this is through your `~/.ssh/config` file. My section for my cloud box looks like this: 104 | 105 | ``` 106 | Host gcptunnel 107 | HostName 108 | IdentityFile ~/.ssh/google_compute_engine <--- yours may differ 109 | User jamie <--- yours probably differs 110 | Compression yes 111 | ExitOnForwardFailure no 112 | LocalForward 3000 127.0.0.1:3000 113 | LocalForward 3001 127.0.0.1:3001 114 | LocalForward 3002 127.0.0.1:3002 115 | LocalForward 3003 127.0.0.1:3003 116 | LocalForward 3004 127.0.0.1:80 117 | LocalForward 5601 127.0.0.1:5601 118 | LocalForward 8000 127.0.0.1:8000 119 | LocalForward 9200 127.0.0.1:9200 120 | LocalForward 9222 127.0.0.1:9222 121 | ``` 122 | 123 | Then to start them up you just run `ssh gcptunnel`. 124 | 125 | ### Stopping an Environment 126 | 127 | All services: 128 | ``` 129 | ./scripts/compose.py stop 130 | 131 | # OR 132 | 133 | docker-compose down 134 | ``` 135 | -------------------------------------------------------------------------------- /scripts/modules/aux_services.py: -------------------------------------------------------------------------------- 1 | # 2 | # Supporting Services 3 | # 4 | 5 | 6 | from .helpers import curl_healthcheck, parse_version 7 | from .service import StackService, Service 8 | 9 | 10 | class Logstash(StackService, Service): 11 | SERVICE_PORT = 5044 12 | 13 | def build_candidate_manifest(self): 14 | version = self.version 15 | image = self.docker_name 16 | if self.oss: 17 | image += "-oss" 18 | if self.ubi8: 19 | image += "-ubi8" 20 | key = "{image}-{version}-docker-image.tar.gz".format( 21 | image=image, 22 | version=version, 23 | ) 24 | return self.bc["projects"]["logstash-docker"]["packages"][key] 25 | 26 | def _content(self): 27 | self.es_urls = ",".join(self.options.get( 28 | "logstash_elasticsearch_urls") or [self.DEFAULT_ELASTICSEARCH_HOSTS_NO_TLS]) 29 | if self.at_least_version("7.3") \ 30 | or self.options.get("apm_server_snapshot") \ 31 | or (not self.options.get("apm_server_version") is None and 32 | parse_version("7.3") <= parse_version(self.options.get("apm_server_version"))): 33 | volumes = ["./docker/logstash/pipeline/:/usr/share/logstash/pipeline/"] 34 | else: 35 | volumes = ["./docker/logstash/pipeline-6.x-compat/:/usr/share/logstash/pipeline/"] 36 | 37 | return dict( 38 | depends_on={"elasticsearch": {"condition": "service_healthy"}} if self.options.get( 39 | "enable_elasticsearch", True) else {}, 40 | environment={ 41 | "ELASTICSEARCH_URL": self.es_urls, 42 | }, 43 | healthcheck=curl_healthcheck(9600, "logstash", path="/"), 44 | ports=[self.publish_port(self.port, self.SERVICE_PORT), "9600"], 45 | volumes=volumes 46 | ) 47 | 48 | @classmethod 49 | def add_arguments(cls, parser): 50 | super(Logstash, cls).add_arguments(parser) 51 | parser.add_argument( 52 | "--logstash-elasticsearch-url", 53 | action="append", 54 | dest="logstash_elasticsearch_urls", 55 | help="logstash elasticsearch output url(s)." 56 | ) 57 | 58 | 59 | class Kafka(Service): 60 | SERVICE_PORT = 9092 61 | 62 | def _content(self): 63 | return dict( 64 | depends_on=["zookeeper"], 65 | environment={ 66 | "KAFKA_ADVERTISED_LISTENERS": "PLAINTEXT://kafka:9092", 67 | "KAFKA_BROKER_ID": 1, 68 | "KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR": 1, 69 | "KAFKA_ZOOKEEPER_CONNECT": "zookeeper:2181", 70 | }, 71 | image="confluentinc/cp-kafka:4.1.3", 72 | labels=None, 73 | logging=None, 74 | ports=[self.publish_port(self.port, self.SERVICE_PORT)], 75 | ) 76 | 77 | 78 | class Postgres(Service): 79 | SERVICE_PORT = 5432 80 | opbeans_side_car = True 81 | 82 | def _content(self): 83 | return dict( 84 | environment=["POSTGRES_DB=opbeans", "POSTGRES_PASSWORD=verysecure"], 85 | healthcheck={"interval": "10s", "test": ["CMD", "pg_isready", "-h", "postgres", "-U", "postgres"]}, 86 | image="postgres:14", 87 | labels=None, 88 | ports=[self.publish_port(self.port, self.SERVICE_PORT, expose=True)], 89 | volumes=["./docker/opbeans/sql:/docker-entrypoint-initdb.d", "pgdata:/var/lib/postgresql/data"], 90 | ) 91 | 92 | 93 | class Redis(Service): 94 | SERVICE_PORT = 6379 95 | opbeans_side_car = True 96 | 97 | def _content(self): 98 | return dict( 99 | healthcheck={"interval": "10s", "test": ["CMD", "redis-cli", "ping"]}, 100 | image="redis:4", 101 | labels=None, 102 | command="--save ''", # disable persistence 103 | ports=[self.publish_port(self.port, self.SERVICE_PORT, expose=True)], 104 | ) 105 | 106 | 107 | class Zookeeper(Service): 108 | SERVICE_PORT = 2181 109 | 110 | def _content(self): 111 | return dict( 112 | environment={ 113 | "ZOOKEEPER_CLIENT_PORT": 2181, 114 | "ZOOKEEPER_TICK_TIME": 2000, 115 | }, 116 | image="confluentinc/cp-zookeeper:latest", 117 | labels=None, 118 | logging=None, 119 | ports=[self.publish_port(self.port, self.SERVICE_PORT)], 120 | ) 121 | 122 | 123 | class StatsD(Service): 124 | SERVICE_PORT = 8125 125 | 126 | def _content(self): 127 | return dict( 128 | build=dict( 129 | context="docker/statsd", 130 | dockerfile="Dockerfile", 131 | args=[] 132 | ), 133 | healthcheck={"interval": "10s", "test": ["CMD", "pidof", "node"]}, 134 | image=None, 135 | labels=None, 136 | ports=["8125:8125/udp", "8126:8126", "8127:8127"], 137 | ) 138 | 139 | 140 | class CommandService(object): 141 | def __init__(self, command, service="command", image="busybox", depends_on=None): 142 | self.command = command 143 | 144 | self.depends_on = depends_on 145 | self.image = image 146 | self.service = service 147 | 148 | def name(self): 149 | return self.service 150 | 151 | @staticmethod 152 | def image_download_url(): 153 | return None 154 | 155 | def render(self): 156 | content = { 157 | "command": self.command, 158 | "image": self.image, 159 | } 160 | if self.depends_on: 161 | content["depends_on"] = {d: {"condition": "service_healthy"} for d in self.depends_on} 162 | return {self.service: content} 163 | 164 | 165 | class WaitService(Service): 166 | """Create a service that depends on all services .""" 167 | 168 | def __init__(self, services, **options): 169 | super(WaitService, self).__init__(**options) 170 | self.services = services 171 | 172 | def _content(self): 173 | # Sorting is not relevant to docker-compose but is included here 174 | # to allow the tests to check for a consistently-ordered list 175 | for s in sorted(self.services, key=lambda x: x.name()): 176 | if s.name() != self.name() and s.name() != "opbeans-load-generator": 177 | self.depends_on[s.docker_service_name()] = {"condition": "service_healthy"} 178 | return dict( 179 | container_name="wait", 180 | image="busybox", 181 | depends_on=self.depends_on, 182 | labels=None, 183 | logging=None, 184 | ) 185 | -------------------------------------------------------------------------------- /docker/dyno/app/api/docker.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # Licensed to Elasticsearch B.V. under one or more contributor 4 | # license agreements. See the NOTICE file distributed with 5 | # this work for additional information regarding copyright 6 | # ownership. Elasticsearch B.V. licenses this file to you under 7 | # the Apache License, Version 2.0 (the "License"); you may 8 | # not use this file except in compliance with the License. 9 | # You may obtain a copy of the License at 10 | # 11 | # http://www.apache.org/licenses/LICENSE-2.0 12 | # 13 | # Unless required by applicable law or agreed to in writing, 14 | # software distributed under the License is distributed on an 15 | # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 16 | # KIND, either express or implied. See the License for the 17 | # specific language governing permissions and limitations 18 | 19 | import os 20 | import docker 21 | import yaml 22 | from dyno import app 23 | from flask import request 24 | 25 | from flask import Blueprint 26 | 27 | bp = Blueprint('docker', __name__) 28 | 29 | 30 | client = docker.from_env() 31 | low_client = docker.APIClient() 32 | 33 | 34 | def _normalize_name(name): 35 | """ 36 | Small helper to find the container name. 37 | 38 | Pass in something like `elasticsearch` and have to 39 | find it in a list of container names that are like: 40 | localtesting_7.9.0_elasticsearch 41 | 42 | Parameters 43 | ---------- 44 | str : name 45 | The name to normalize 46 | 47 | Returns 48 | ------- 49 | str 50 | The normalized name 51 | """ 52 | containers = container_list() 53 | found = [] 54 | for container in containers['containers']: 55 | if container.split('_').pop().strip() == name: 56 | found.append(container) 57 | if len(found) > 1: 58 | raise Exception('Found more than one instance matching [{}]'.format(name)) 59 | if not found: 60 | raise Exception('Could not normalize [{}] because it was not found in the container list') 61 | return found[0] 62 | 63 | 64 | @bp.route('/list', methods=['GET']) 65 | def container_list(): 66 | """ 67 | Return list of containers 68 | 69 | Note 70 | ---- 71 | Exposed via HTTP at /api/docker/list 72 | 73 | Note 74 | ---- 75 | Paramaters are received query arguments in a Flask request object. They 76 | may not be passed directly to this function. 77 | 78 | Returns 79 | ------- 80 | dict 81 | A dict with a key of 'containers' and the value of which is a list of 82 | the running containers 83 | 84 | Examples 85 | -------- 86 | ❯ curl -s http://localhost:9000/api/docker/list|jq 87 | { 88 | "containers": [ 89 | "localtesting_8.0.0_opbeans-load-generator", 90 | "localtesting_latest_opbeans-python", 91 | "localtesting_latest_opbeans-python01", 92 | "dyno", 93 | "localtesting_8.0.0_apm-server", 94 | "localtesting_8.0.0_kibana", 95 | "localtesting_8.0.0_elasticsearch", 96 | "localtesting_8.0.0_postgres", 97 | "localtesting_8.0.0_redis", 98 | "localtesting_8.0_toxi", 99 | "localtesting_8.0_stats-d" 100 | ] 101 | } 102 | """ 103 | ret = {'containers': []} 104 | containers = client.containers.list() 105 | for container in containers: 106 | ret['containers'].append(container.name) 107 | return ret 108 | 109 | 110 | @bp.route('/query', methods=['GET']) 111 | def query(): 112 | """ 113 | Inspect container and return information 114 | 115 | Note 116 | ---- 117 | Exposed via HTTP at /api/docker/query 118 | 119 | Note 120 | ---- 121 | Paramaters are received query arguments in a Flask request object. They 122 | may not be passed directly to this function. 123 | 124 | Parameters 125 | ---------- 126 | c : str 127 | Container to get 128 | 129 | Returns 130 | ------- 131 | dict 132 | A dictionary showing the configuration for the running container 133 | 134 | """ 135 | container = request.args.get('c') 136 | config = low_client.inspect_container(_normalize_name(container))['HostConfig'] 137 | """ 138 | cpu_quota (int) - Limit CPU CFS (Completely Fair Scheduler) quota 139 | -> HostConfig.CpuShares 140 | blkio_weight (int) - Block IO (relative weight), between 10 and 1000 141 | -> HostConfig.BlkioWeight 142 | mem_limit (str) - Memory limit with units, such as 4m. The following 143 | are supported: b, k, m, g 144 | -> HostConfig.Memory 145 | """ 146 | ret = { 147 | 'CPU': _denormalize_value('cpu', config['CpuQuota']), 148 | 'Mem': _denormalize_value('mem', config['Memory']), 149 | # IO is a future TODO 150 | # 'IO': _denormalize_value('io', config['BlkioWeight']), 151 | } 152 | return ret 153 | 154 | @bp.route('/update', methods=['GET']) 155 | def update(): 156 | """ 157 | Update a container setting 158 | 159 | We take the following required args: 160 | c: (str) The container name 161 | 162 | """ 163 | c = _normalize_name(request.args.get('c')) 164 | component = request.args.get('component') 165 | val = int(request.args.get('val')) 166 | config = { 167 | 'container': c, 168 | 'settings': {} 169 | } 170 | c = component.lower() 171 | if c == 'cpu': 172 | config['settings']['cpu_quota'] = _normalize_value(c, val) 173 | if c == 'io': 174 | config['settings']['blkio_weight'] = _normalize_value(c, val) 175 | if c == 'mem': 176 | config['settings']['mem_limit'] = str(_normalize_value(c, val)) + "m" 177 | config['settings']['memswap_limit'] = -1 178 | c = client.containers.get(config['container']) 179 | c.update(**config['settings']) 180 | return {} 181 | 182 | def _range(): 183 | range_path = os.path.join(app.app.root_path, 'range.yml') 184 | with open(range_path, 'r') as fh_: 185 | slider_range = yaml.load(fh_, Loader=yaml.FullLoader) 186 | return slider_range 187 | 188 | def _denormalize_value(code, val): 189 | """ 190 | Take a current value and return the percentage val 191 | """ 192 | slider_range = _range() 193 | lval, uval = slider_range[code] 194 | ret = ((val - min([uval, lval])) / (max([lval, uval]) - min([lval-uval]))) + 1 195 | return int(ret) 196 | 197 | def _normalize_value(code, val): 198 | """ 199 | This uses the range.yml configuration file which populates 200 | a set of values to determine the upper and lower range. 201 | We take our input value from the web interface to this function 202 | which is in the range of 0-100 and we turn that into an actual 203 | value to pass to the toxic 204 | """ 205 | slider_range = _range() 206 | 207 | lval, uval = slider_range[code] 208 | 209 | ret = (((val) * max([lval, uval]) - min([lval, uval])) / 100) + min([lval, uval]) 210 | return int(ret) 211 | -------------------------------------------------------------------------------- /docker/filebeat/filebeat.yml: -------------------------------------------------------------------------------- 1 | --- 2 | setup.template.settings: 3 | index.number_of_shards: 1 4 | index.codec: best_compression 5 | index.number_of_replicas: 0 6 | 7 | setup.kibana: 8 | host: "${KIBANA_HOST:kibana:5601}" 9 | 10 | output.elasticsearch: 11 | hosts: '${ELASTICSEARCH_HOSTS:elasticsearch:9200}' 12 | username: '${ELASTICSEARCH_USERNAME:}' 13 | password: '${ELASTICSEARCH_PASSWORD:}' 14 | 15 | logging.json: true 16 | logging.metrics.enabled: false 17 | 18 | monitoring.enabled: true 19 | 20 | processors: 21 | - add_host_metadata: ~ 22 | - add_cloud_metadata: ~ 23 | - add_docker_metadata: ~ 24 | - add_kubernetes_metadata: ~ 25 | ################################################################################################### 26 | ## autodiscover 27 | ################################################################################################### 28 | filebeat.autodiscover: 29 | providers: 30 | - type: docker 31 | templates: 32 | - condition: 33 | contains: 34 | docker.container.name: "opbeans-" 35 | config: 36 | - type: container 37 | paths: 38 | - "/var/lib/docker/containers/*/${data.docker.container.id}-json.log" 39 | include_lines: ['^{'] 40 | tail_files: true 41 | processors: 42 | - add_tags: 43 | tags: [json] 44 | target: "parser_type" 45 | - decode_json_fields: 46 | fields: 47 | - message 48 | target: "" 49 | overwrite_keys: true 50 | add_error_key: true 51 | - drop_fields: 52 | fields: 53 | - service 54 | - event 55 | - url 56 | - error 57 | fields_under_root: true 58 | - condition: 59 | contains: 60 | docker.container.name: "opbeans-" 61 | config: 62 | - type: container 63 | paths: 64 | - "/var/lib/docker/containers/*/${data.docker.container.id}-json.log" 65 | tail_files: true 66 | multiline.pattern: '^[[:blank:]]' 67 | multiline.negate: false 68 | multiline.match: after 69 | exclude_lines: ['^{'] 70 | processors: 71 | - add_tags: 72 | tags: [no_json] 73 | target: "parser_type" 74 | - condition: 75 | contains: 76 | docker.container.name: "kibana" 77 | config: 78 | - type: container 79 | paths: 80 | - "/var/lib/docker/containers/*/${data.docker.container.id}-json.log" 81 | tail_files: true 82 | json.add_error_key: true 83 | json.overwrite_keys: true 84 | json.keys_under_root: true 85 | - condition: 86 | contains: 87 | docker.container.name: "elasticsearch" 88 | config: 89 | - type: container 90 | paths: 91 | - "/var/lib/docker/containers/*/${data.docker.container.id}-json.log" 92 | tail_files: true 93 | json.add_error_key: true 94 | json.overwrite_keys: true 95 | json.keys_under_root: true 96 | - condition: 97 | contains: 98 | docker.container.name: "metricbeat" 99 | config: 100 | - type: container 101 | paths: 102 | - "/var/lib/docker/containers/*/${data.docker.container.id}-json.log" 103 | tail_files: true 104 | json.add_error_key: true 105 | json.overwrite_keys: true 106 | json.keys_under_root: true 107 | - condition: 108 | contains: 109 | docker.container.name: "heartbeat" 110 | config: 111 | - type: container 112 | paths: 113 | - "/var/lib/docker/containers/*/${data.docker.container.id}-json.log" 114 | tail_files: true 115 | json.add_error_key: true 116 | json.overwrite_keys: true 117 | json.keys_under_root: true 118 | - condition: 119 | contains: 120 | docker.container.name: "filebeat" 121 | config: 122 | - type: container 123 | paths: 124 | - "/var/lib/docker/containers/*/${data.docker.container.id}-json.log" 125 | tail_files: true 126 | json.add_error_key: true 127 | json.overwrite_keys: true 128 | json.keys_under_root: true 129 | - condition: 130 | contains: 131 | docker.container.name: "apm-server" 132 | config: 133 | - type: container 134 | paths: 135 | - "/var/lib/docker/containers/*/${data.docker.container.id}-json.log" 136 | tail_files: true 137 | json.add_error_key: true 138 | json.overwrite_keys: true 139 | json.keys_under_root: true 140 | processors: 141 | - rename: 142 | fields: 143 | - from: "error" 144 | to: "error_apm_server" 145 | ignore_missing: false 146 | fail_on_error: true 147 | - condition: 148 | contains: 149 | docker.container.name: "postgres" 150 | config: 151 | - type: container 152 | paths: 153 | - "/var/lib/docker/containers/*/${data.docker.container.id}-json.log" 154 | tail_files: true 155 | multiline.pattern: '^\t' 156 | multiline.negate: false 157 | multiline.match: after 158 | - condition: 159 | and: 160 | - not: 161 | contains: 162 | docker.container.name: "apm-server" 163 | - not: 164 | contains: 165 | docker.container.name: "filebeat" 166 | - not: 167 | contains: 168 | docker.container.name: "heartbeat" 169 | - not: 170 | contains: 171 | docker.container.name: "kibana" 172 | - not: 173 | contains: 174 | docker.container.name: "metricbeat" 175 | - not: 176 | contains: 177 | docker.container.name: "opbeans-" 178 | - not: 179 | contains: 180 | docker.container.name: "postgres" 181 | config: 182 | - type: container 183 | paths: 184 | - "/var/lib/docker/containers/*/${data.docker.container.id}-json.log" 185 | tail_files: true 186 | 187 | http.enabled: true 188 | http.host: localhost 189 | http.port: 5066 190 | -------------------------------------------------------------------------------- /scripts/tests/config/test_start_main_default.yml: -------------------------------------------------------------------------------- 1 | --- 2 | networks: 3 | default: 4 | name: apm-integration-testing 5 | services: 6 | apm-server: 7 | cap_add: 8 | - CHOWN 9 | - DAC_OVERRIDE 10 | - SETGID 11 | - SETUID 12 | cap_drop: 13 | - ALL 14 | command: 15 | - apm-server 16 | - -e 17 | - --httpprof 18 | - :6060 19 | - -E 20 | - apm-server.rum.enabled=true 21 | - -E 22 | - apm-server.rum.event_rate.limit=1000 23 | - -E 24 | - apm-server.host=0.0.0.0:8200 25 | - -E 26 | - apm-server.read_timeout=1m 27 | - -E 28 | - apm-server.shutdown_timeout=2m 29 | - -E 30 | - apm-server.write_timeout=1m 31 | - -E 32 | - logging.json=true 33 | - -E 34 | - logging.metrics.enabled=false 35 | - -E 36 | - monitoring.elasticsearch=true 37 | - -E 38 | - monitoring.enabled=true 39 | - -E 40 | - apm-server.rum.allow_headers=["x-custom-header"] 41 | - -E 42 | - apm-server.kibana.enabled=true 43 | - -E 44 | - apm-server.kibana.host=kibana:5601 45 | - -E 46 | - apm-server.agent.config.cache.expiration=30s 47 | - -E 48 | - apm-server.kibana.username=apm_server_user 49 | - -E 50 | - apm-server.kibana.password=changeme 51 | - -E 52 | - output.elasticsearch.hosts=["http://elasticsearch:9200"] 53 | - -E 54 | - output.elasticsearch.username=apm_server_user 55 | - -E 56 | - output.elasticsearch.password=changeme 57 | - -E 58 | - output.elasticsearch.enabled=true 59 | container_name: localtesting_7.17.0_apm-server 60 | depends_on: 61 | elasticsearch: 62 | condition: service_healthy 63 | kibana: 64 | condition: service_healthy 65 | environment: 66 | - BEAT_STRICT_PERMS=false 67 | healthcheck: 68 | interval: 10s 69 | retries: 12 70 | test: 71 | - CMD 72 | - curl 73 | - --write-out 74 | - "'HTTP %{http_code}'" 75 | - -k 76 | - --fail 77 | - --silent 78 | - --output 79 | - /dev/null 80 | - http://localhost:8200/ 81 | timeout: 5s 82 | image: docker.elastic.co/apm/apm-server:7.17.0-SNAPSHOT 83 | labels: 84 | - co.elastic.apm.stack-version=7.17.0 85 | logging: 86 | driver: json-file 87 | options: 88 | max-file: "5" 89 | max-size: 2m 90 | ports: 91 | - 127.0.0.1:8200:8200 92 | - 127.0.0.1:6060:6060 93 | elasticsearch: 94 | container_name: localtesting_7.17.0_elasticsearch 95 | environment: 96 | - bootstrap.memory_lock=true 97 | - cluster.name=docker-cluster 98 | - cluster.routing.allocation.disk.threshold_enabled=false 99 | - discovery.type=single-node 100 | - path.repo=/usr/share/elasticsearch/data/backups 101 | - ES_JAVA_OPTS=-XX:UseAVX=2 -Xms1g -Xmx1g 102 | - path.data=/usr/share/elasticsearch/data/7.17.0 103 | - xpack.security.authc.anonymous.roles=remote_monitoring_collector 104 | - xpack.security.authc.realms.file.file1.order=0 105 | - xpack.security.authc.realms.native.native1.order=1 106 | - xpack.security.authc.token.enabled=true 107 | - xpack.security.authc.api_key.enabled=true 108 | - xpack.security.enabled=true 109 | - xpack.license.self_generated.type=trial 110 | - xpack.monitoring.collection.enabled=true 111 | healthcheck: 112 | interval: 20s 113 | retries: 10 114 | test: 115 | - CMD-SHELL 116 | - curl -s -k http://localhost:9200/_cluster/health | grep -vq '"status":"red"' 117 | image: docker.elastic.co/elasticsearch/elasticsearch:7.17.0-SNAPSHOT 118 | labels: 119 | - co.elastic.apm.stack-version=7.17.0 120 | - co.elastic.metrics/module=elasticsearch 121 | - co.elastic.metrics/metricsets=node,node_stats 122 | - co.elastic.metrics/hosts=http://$${data.host}:9200 123 | logging: 124 | driver: json-file 125 | options: 126 | max-file: "5" 127 | max-size: 2m 128 | ports: 129 | - 127.0.0.1:9200:9200 130 | ulimits: 131 | memlock: 132 | hard: -1 133 | soft: -1 134 | volumes: 135 | - esdata:/usr/share/elasticsearch/data 136 | - ./docker/elasticsearch/roles.yml:/usr/share/elasticsearch/config/roles.yml 137 | - ./docker/elasticsearch/users:/usr/share/elasticsearch/config/users 138 | - ./docker/elasticsearch/users_roles:/usr/share/elasticsearch/config/users_roles 139 | - ./docker/elasticsearch/service_tokens:/usr/share/elasticsearch/config/service_tokens 140 | fleet_setup: 141 | command: 142 | - curl 143 | - -X 144 | - POST 145 | - -H 146 | - "kbn-xsrf: 1" 147 | - http://admin:changeme@kibana:5601/api/fleet/setup 148 | depends_on: 149 | kibana: 150 | condition: service_healthy 151 | image: docker.elastic.co/elasticsearch/elasticsearch:8.0.0-SNAPSHOT 152 | kibana: 153 | container_name: localtesting_7.17.0_kibana 154 | depends_on: 155 | elasticsearch: 156 | condition: service_healthy 157 | environment: 158 | ELASTICSEARCH_HOSTS: http://elasticsearch:9200 159 | ELASTICSEARCH_PASSWORD: changeme 160 | ELASTICSEARCH_USERNAME: kibana_system_user 161 | SERVER_HOST: 0.0.0.0 162 | SERVER_NAME: kibana.example.org 163 | STATUS_ALLOWANONYMOUS: "true" 164 | TELEMETRY_ENABLED: "false" 165 | XPACK_APM_SERVICEMAPENABLED: "true" 166 | XPACK_ENCRYPTEDSAVEDOBJECTS_ENCRYPTIONKEY: fhjskloppd678ehkdfdlliverpoolfcr 167 | XPACK_FLEET_AGENTS_ELASTICSEARCH_HOSTS: '["http://elasticsearch:9200"]' 168 | XPACK_FLEET_REGISTRYURL: https://epr-snapshot.elastic.co 169 | XPACK_MONITORING_ENABLED: "true" 170 | XPACK_REPORTING_ROLES_ENABLED: "false" 171 | XPACK_SECURITY_ENCRYPTIONKEY: fhjskloppd678ehkdfdlliverpoolfcr 172 | XPACK_SECURITY_LOGINASSISTANCEMESSAGE: Login details: `admin/changeme`. Further details [here](https://github.com/elastic/apm-integration-testing#logging-in). 173 | XPACK_SECURITY_SESSION_IDLETIMEOUT: 1M 174 | XPACK_SECURITY_SESSION_LIFESPAN: 3M 175 | XPACK_XPACK_MAIN_TELEMETRY_ENABLED: "false" 176 | healthcheck: 177 | interval: 10s 178 | retries: 30 179 | start_period: 10s 180 | test: 181 | - CMD-SHELL 182 | - curl -s -k http://kibana:5601/api/status | grep -q 'Looking good' 183 | image: docker.elastic.co/kibana/kibana:7.17.0-SNAPSHOT 184 | labels: 185 | - co.elastic.apm.stack-version=7.17.0 186 | logging: 187 | driver: json-file 188 | options: 189 | max-file: "5" 190 | max-size: 2m 191 | ports: 192 | - 127.0.0.1:5601:5601 193 | wait-service: 194 | container_name: wait 195 | depends_on: 196 | apm-server: 197 | condition: service_healthy 198 | elasticsearch: 199 | condition: service_healthy 200 | kibana: 201 | condition: service_healthy 202 | image: busybox 203 | version: "2.4" 204 | volumes: 205 | esdata: 206 | driver: local 207 | pgdata: 208 | driver: local 209 | -------------------------------------------------------------------------------- /docker/dyno/tests/files/docker_inspect.yml: -------------------------------------------------------------------------------- 1 | AppArmorProfile: '' 2 | Args: 3 | - honcho 4 | - start 5 | Config: 6 | AttachStderr: false 7 | AttachStdin: false 8 | AttachStdout: false 9 | Cmd: 10 | - honcho 11 | - start 12 | Domainname: '' 13 | Entrypoint: 14 | - /app/entrypoint.sh 15 | Env: 16 | - WS=1 17 | - OPBEANS_URLS=opbeans-python:http://opbeans-python:3000 18 | - OPBEANS_RPMS=opbeans-python:100 19 | - PATH=/app/venv/bin:/usr/local/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin 20 | - LANG=C.UTF-8 21 | - GPG_KEY=0D96DF4D4110E5C43FBFB17F2D347EA6AA65421D 22 | - PYTHON_VERSION=3.7.9 23 | - PYTHON_PIP_VERSION=20.3.3 24 | - PYTHON_GET_PIP_URL=https://github.com/pypa/get-pip/raw/5f38681f7f5872e4032860b54e9cc11cf0374932/get-pip.py 25 | - PYTHON_GET_PIP_SHA256=6a0b13826862f33c13b614a921d36253bfa1ae779c5fbf569876f3585057e9d2 26 | - PYTHONUNBUFFERED=1 27 | ExposedPorts: 28 | 8000/tcp: {} 29 | Hostname: 7d917f922c4f 30 | Image: lg 31 | Labels: 32 | com.docker.compose.config-hash: 86111e599c962ffda85de1fd7770d4f949e5555652282a165da4347224e24b66 33 | com.docker.compose.container-number: '1' 34 | com.docker.compose.oneoff: 'False' 35 | com.docker.compose.project: apm-integration-testing 36 | com.docker.compose.project.config_files: /Users/mp/devel/apm-integration-testing/docker-compose.yml 37 | com.docker.compose.project.working_dir: /Users/mp/devel/apm-integration-testing 38 | com.docker.compose.service: opbeans-load-generator 39 | com.docker.compose.version: 1.27.4 40 | OnBuild: null 41 | OpenStdin: false 42 | StdinOnce: false 43 | Tty: false 44 | User: '' 45 | Volumes: null 46 | WorkingDir: /app 47 | Created: '2021-01-04T16:24:11.1589578Z' 48 | Driver: overlay2 49 | ExecIDs: null 50 | GraphDriver: 51 | Data: 52 | LowerDir: /var/lib/docker/overlay2/d4e3394374447deb1a05dff37475e1d886b2b6d6c6a2b7757888fe960400270b-init/diff:/var/lib/docker/overlay2/ffb7b150eec8b766215e811bfd0431c848afd562ff252698a8706a481a57396f/diff:/var/lib/docker/overlay2/5347dfe3099b25192613d749ed745faa062ab8ff5e8381e0a3641629726e370b/diff:/var/lib/docker/overlay2/948f4e1a6ad7d5609ae1844d4e8b15a84c599778158813a86cde801af757bc27/diff:/var/lib/docker/overlay2/6ed8629940031876cf37fa8490ce68c20657a53fbbecce591c7a3a39c67019fc/diff:/var/lib/docker/overlay2/fa93947244a7b76e5fcb9040b03d6aeca29367eda3fce4dc1b46221bb61c0825/diff:/var/lib/docker/overlay2/96b581b541eab5ad027d76411082d4190ac1645bfed4b6e7f55300f7d4ff5c63/diff:/var/lib/docker/overlay2/7c79b01d96ca23555d847ace9d8c8e80d5c49644003d885ec697bae1df0964b6/diff 53 | MergedDir: /var/lib/docker/overlay2/d4e3394374447deb1a05dff37475e1d886b2b6d6c6a2b7757888fe960400270b/merged 54 | UpperDir: /var/lib/docker/overlay2/d4e3394374447deb1a05dff37475e1d886b2b6d6c6a2b7757888fe960400270b/diff 55 | WorkDir: /var/lib/docker/overlay2/d4e3394374447deb1a05dff37475e1d886b2b6d6c6a2b7757888fe960400270b/work 56 | Name: overlay2 57 | HostConfig: 58 | AutoRemove: false 59 | Binds: [] 60 | BlkioDeviceReadBps: null 61 | BlkioDeviceReadIOps: null 62 | BlkioDeviceWriteBps: null 63 | BlkioDeviceWriteIOps: null 64 | BlkioWeight: 0 65 | BlkioWeightDevice: null 66 | CapAdd: null 67 | CapDrop: null 68 | Capabilities: null 69 | Cgroup: '' 70 | CgroupParent: '' 71 | ConsoleSize: 72 | - 0 73 | - 0 74 | ContainerIDFile: '' 75 | CpuCount: 0 76 | CpuPercent: 0 77 | CpuPeriod: 0 78 | # CpuQuota modified specifically for use as test value 79 | CpuQuota: 1000000 80 | CpuRealtimePeriod: 0 81 | CpuRealtimeRuntime: 0 82 | CpuShares: 0 83 | CpusetCpus: '' 84 | CpusetMems: '' 85 | DeviceCgroupRules: null 86 | DeviceRequests: null 87 | Devices: null 88 | Dns: null 89 | DnsOptions: null 90 | DnsSearch: null 91 | ExtraHosts: null 92 | GroupAdd: null 93 | IOMaximumBandwidth: 0 94 | IOMaximumIOps: 0 95 | IpcMode: private 96 | Isolation: '' 97 | KernelMemory: 0 98 | KernelMemoryTCP: 0 99 | Links: null 100 | LogConfig: 101 | Config: 102 | max-file: '5' 103 | max-size: 2m 104 | Type: json-file 105 | MaskedPaths: 106 | - /proc/asound 107 | - /proc/acpi 108 | - /proc/kcore 109 | - /proc/keys 110 | - /proc/latency_stats 111 | - /proc/timer_list 112 | - /proc/timer_stats 113 | - /proc/sched_debug 114 | - /proc/scsi 115 | - /sys/firmware 116 | # Memory modified specifically for use as test val 117 | Memory: 1000 118 | MemoryReservation: 0 119 | MemorySwap: 0 120 | MemorySwappiness: null 121 | NanoCpus: 0 122 | NetworkMode: apm-integration-testing 123 | OomKillDisable: false 124 | OomScoreAdj: 0 125 | PidMode: '' 126 | PidsLimit: null 127 | PortBindings: 128 | 8000/tcp: 129 | - HostIp: '' 130 | HostPort: '8999' 131 | Privileged: false 132 | PublishAllPorts: false 133 | ReadonlyPaths: 134 | - /proc/bus 135 | - /proc/fs 136 | - /proc/irq 137 | - /proc/sys 138 | - /proc/sysrq-trigger 139 | ReadonlyRootfs: false 140 | RestartPolicy: 141 | MaximumRetryCount: 0 142 | Name: '' 143 | Runtime: runc 144 | SecurityOpt: null 145 | ShmSize: 67108864 146 | UTSMode: '' 147 | Ulimits: null 148 | UsernsMode: '' 149 | VolumeDriver: '' 150 | VolumesFrom: [] 151 | HostnamePath: /var/lib/docker/containers/7d917f922c4f5a4d041fa49d579c153b63786cd85e489c18c15540c413141289/hostname 152 | HostsPath: /var/lib/docker/containers/7d917f922c4f5a4d041fa49d579c153b63786cd85e489c18c15540c413141289/hosts 153 | Id: 7d917f922c4f5a4d041fa49d579c153b63786cd85e489c18c15540c413141289 154 | Image: sha256:e5989de1c5f32fadcc3acfd47683cec920d3df896802c0d1c0aaee1db67d008e 155 | LogPath: /var/lib/docker/containers/7d917f922c4f5a4d041fa49d579c153b63786cd85e489c18c15540c413141289/7d917f922c4f5a4d041fa49d579c153b63786cd85e489c18c15540c413141289-json.log 156 | MountLabel: '' 157 | Mounts: [] 158 | Name: /localtesting_7.9.0_opbeans-load-generator 159 | NetworkSettings: 160 | Bridge: '' 161 | EndpointID: '' 162 | Gateway: '' 163 | GlobalIPv6Address: '' 164 | GlobalIPv6PrefixLen: 0 165 | HairpinMode: false 166 | IPAddress: '' 167 | IPPrefixLen: 0 168 | IPv6Gateway: '' 169 | LinkLocalIPv6Address: '' 170 | LinkLocalIPv6PrefixLen: 0 171 | MacAddress: '' 172 | Networks: 173 | apm-integration-testing: 174 | Aliases: 175 | - opbeans-load-generator 176 | - 7d917f922c4f 177 | DriverOpts: null 178 | EndpointID: 67b74979d669210db55719d8bc2ba84b71aa6c2f1a9b112110af62341c68113a 179 | Gateway: 172.18.0.1 180 | GlobalIPv6Address: '' 181 | GlobalIPv6PrefixLen: 0 182 | IPAMConfig: null 183 | IPAddress: 172.18.0.11 184 | IPPrefixLen: 16 185 | IPv6Gateway: '' 186 | Links: null 187 | MacAddress: 02:42:ac:12:00:0b 188 | NetworkID: 30df752494af36cbda221354d195f8ccb505974d248fa291da932cac143316e9 189 | Ports: 190 | 8000/tcp: 191 | - HostIp: 0.0.0.0 192 | HostPort: '8999' 193 | SandboxID: e3f238b94d0b00fb7c2c2bd58c5a9dde85b6bf18235b4b78b3cf9efee1d745d7 194 | SandboxKey: /var/run/docker/netns/e3f238b94d0b 195 | SecondaryIPAddresses: null 196 | SecondaryIPv6Addresses: null 197 | Path: /app/entrypoint.sh 198 | Platform: linux 199 | ProcessLabel: '' 200 | ResolvConfPath: /var/lib/docker/containers/7d917f922c4f5a4d041fa49d579c153b63786cd85e489c18c15540c413141289/resolv.conf 201 | RestartCount: 0 202 | State: 203 | Dead: false 204 | Error: '' 205 | ExitCode: 0 206 | FinishedAt: '0001-01-01T00:00:00Z' 207 | OOMKilled: false 208 | Paused: false 209 | Pid: 10361 210 | Restarting: false 211 | Running: true 212 | StartedAt: '2021-01-04T16:24:11.7255039Z' 213 | Status: running 214 | -------------------------------------------------------------------------------- /docker/dyno/tests/unit/test_control.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | # Licensed to Elasticsearch B.V. under one or more contributor 4 | # license agreements. See the NOTICE file distributed with 5 | # this work for additional information regarding copyright 6 | # ownership. Elasticsearch B.V. licenses this file to you under 7 | # the Apache License, Version 2.0 (the "License"); you may 8 | # not use this file except in compliance with the License. 9 | # You may obtain a copy of the License at 10 | # 11 | # http://www.apache.org/licenses/LICENSE-2.0 12 | # 13 | # Unless required by applicable law or agreed to in writing, 14 | # software distributed under the License is distributed on an 15 | # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 16 | # KIND, either express or implied. See the License for the 17 | # specific language governing permissions and limitations 18 | 19 | """ 20 | Tests for the Openbeans Dyno 21 | """ 22 | import toxiproxy 23 | from pytest import mark 24 | from unittest import mock 25 | from flask import url_for 26 | import dyno.app.api.control as ctl 27 | 28 | @mock.patch('toxiproxy.server.Toxiproxy.update_api_consumer') 29 | def test_fetch_proxy_update_consumer(consumer_patch, toxi_default_environment): 30 | """ 31 | GIVEN an environment with TOXI_HOST or TOXI_PORT set 32 | WHEN the _fetch_proxy() helper function is called 33 | THEN the proxy api consumer is updated 34 | """ 35 | ctl._fetch_proxy() 36 | consumer_patch.assert_called_once() 37 | 38 | 39 | @mark.parametrize('toxi_env', ['TOXI_HOST', 'TOXI_PORT']) 40 | @mock.patch('toxiproxy.server.Toxiproxy.update_api_consumer') 41 | def test_fetch_proxy_no_update_consumer(consumer_patch, toxi_default_environment, toxi_env, monkeypatch): 42 | """ 43 | GIVEN an environment without both TOXI_HOST and TOXI_PORT set 44 | WHEN the _fetch_proxy() helper function is called 45 | THEN the proxy api consumer is *not* updated 46 | """ 47 | monkeypatch.delenv(toxi_env) 48 | ctl._fetch_proxy() 49 | consumer_patch.assert_not_called() 50 | 51 | @mark.parametrize('toxi_code', ctl.toxic_map.keys()) 52 | def test_decode_toxi(toxi_code): 53 | """ 54 | GIVEN an shortned toxic code 55 | WHEN the code is given to the _decode_toxic() function 56 | THEN it receives back dictionary with the code 57 | """ 58 | assert ctl._decode_toxic(toxi_code) 59 | 60 | @mark.parametrize('toxi_cfg', ctl.toxic_map.values()) 61 | def test_encode_toxi(toxi_cfg): 62 | """ 63 | GIVEN a toxi configuration 64 | WHEN that configuration is passed to the _encode_toxic() function 65 | THEN the code for that configuration is returned 66 | """ 67 | assert ctl._encode_toxic(toxi_cfg['type'], toxi_cfg['attr']) 68 | 69 | def test_get_app(fetch_proxy_mock, client): 70 | """ 71 | GIVEN an HTTP client 72 | WHEN that client requests the /app endpoint 73 | THEN the client receives a dictionary containing the app proxy config 74 | """ 75 | with mock.patch('dyno.app.api.control._fetch_proxy', fetch_proxy_mock): 76 | res = client.get(url_for('api.fetch_app'), query_string={'name': 'fake_proxy'}) 77 | assert res.json == { 78 | 'enabled': True, 79 | 'listen': 8080, 80 | 'name': 'opbeans-proxy', 81 | 'toxics': {}, 82 | 'upstream': 'fake_upstream' 83 | } 84 | 85 | def test_get_apps(fetch_proxy_mock, client): 86 | """ 87 | GIVEN an HTTP client 88 | WHEN that client requests the /apps endpoint 89 | THEN the client receives a dictionary containing a list of configured apps 90 | """ 91 | with mock.patch('dyno.app.api.control._fetch_proxy', fetch_proxy_mock): 92 | res = client.get(url_for('api.fetch_all_apps'), query_string={'name': 'fake_proxy'}) 93 | assert res.json == {'proxies': ['fake_proxy']} 94 | 95 | def test_get_apps_full(fetch_proxy_mock, client): 96 | """ 97 | GIVEN an HTTP client 98 | WHEN that client requests the /apps endpoint with the `full` argument supplied 99 | THEN the client receives a dictionary back with all apps and their configurations 100 | """ 101 | with mock.patch('dyno.app.api.control._fetch_proxy', fetch_proxy_mock): 102 | res = client.get( 103 | url_for('api.fetch_all_apps'), 104 | query_string={'name': 'fake_proxy', 'full': True} 105 | ) 106 | assert res.json == {'proxies': [{'listen': 8080, 'name': 'opbeans-proxy'}]} 107 | 108 | def test_enable(client): 109 | """ 110 | GIVEN an HTTP client 111 | WHEN that client requests the /enable endpoint to enable a given proxy 112 | THEN the toxiproxy API is instructed to enable the proxy 113 | """ 114 | t_ = mock.Mock(spec=toxiproxy.Toxiproxy, name='toxi_mock') 115 | enable_mock = mock.Mock(spec=toxiproxy.proxy.Proxy, name='enable_mock') 116 | t_.attach_mock(mock.Mock(name='get_proxy_mock', return_value=enable_mock), 'get_proxy') 117 | with mock.patch('dyno.app.api.control._fetch_proxy', return_value=t_): 118 | with mock.patch('toxiproxy.proxy.Proxy', enable_mock): 119 | client.get(url_for('api.enable_proxy')) 120 | enable_mock.enable.assert_called() 121 | 122 | def test_disable(client): 123 | """ 124 | GIVEN an HTTP client 125 | WHEN that client requests the /disable endpoint to enable a given proxy 126 | THEN the toxiproxy API is instructed to disable the proxy 127 | """ 128 | t_ = mock.Mock(spec=toxiproxy.Toxiproxy) 129 | disable_mock = mock.Mock(spec=toxiproxy.proxy.Proxy) 130 | t_.attach_mock(mock.Mock(name='get_proxy_mock', return_value=disable_mock), 'get_proxy') 131 | with mock.patch('dyno.app.api.control._fetch_proxy', return_value=t_): 132 | with mock.patch('toxiproxy.proxy.Proxy', disable_mock): 133 | client.get(url_for('api.disable_proxy')) 134 | disable_mock.disable.assert_called() 135 | 136 | @mark.parametrize('toxi_code', ctl.toxic_map.keys()) 137 | def test_slide(toxi_code, client): 138 | """ 139 | GIVEN an HTTP client 140 | WHEN that client hits the /slider endpoint to adjust values for a proxy 141 | THEN the proxy values are adjusted 142 | """ 143 | t_ = mock.Mock(spec=toxiproxy.Toxiproxy) 144 | proxy_mock = mock.Mock(spec=toxiproxy.proxy.Proxy) 145 | t_.attach_mock(mock.Mock(name='get_proxy_mock', return_value=proxy_mock), 'get_proxy') 146 | with mock.patch('dyno.app.api.control._fetch_proxy', return_value=t_): 147 | with mock.patch('toxiproxy.proxy.Proxy', proxy_mock): 148 | client.post(url_for('api.slide'), json={'tox_code': toxi_code, 'val': 100}) 149 | proxy_mock.add_toxic.assert_called() 150 | 151 | @mark.parametrize('toxi_code', ctl.toxic_map.keys()) 152 | def test_slide_exception_side_effect(toxi_code, client): 153 | """ 154 | GIVEN an HTTP client 155 | WHEN that client hits the /slider endpoint to adjust values for a proxy 156 | THEN the proxy values are adjusted 157 | """ 158 | t_ = mock.Mock(spec=toxiproxy.Toxiproxy) 159 | proxy_mock = mock.Mock(spec=toxiproxy.proxy.Proxy) 160 | proxy_mock.toxics = mock.Mock(return_value=['fake_proxy_1', 'fake_proxy_2']) 161 | proxy_mock.add_toxic = mock.Mock(side_effect=Exception, name='sider') 162 | t_.attach_mock(mock.Mock(name='get_proxy_mock', return_value=proxy_mock), 'get_proxy') 163 | with mock.patch('dyno.app.api.control._fetch_proxy', return_value=t_): 164 | with mock.patch('toxiproxy.proxy.Proxy', proxy_mock): 165 | client.post(url_for('api.slide'), json={'tox_code': toxi_code, 'val': 100}) 166 | proxy_mock.add_toxic.assert_called() 167 | proxy_mock.destroy_toxic.assert_called() 168 | 169 | @mark.parametrize('val', range(1,101, 10)) 170 | @mock.patch('dyno.app.api.control._range', mock.Mock(return_value={'Fr': [1,10]})) 171 | def test_normalize(val): 172 | """ 173 | GIVEN values between 1-100 174 | WHEN the value is sent to be normalized 175 | THEN the correct normalized value is returned 176 | """ 177 | got = ctl._normalize_value('Fr', val) 178 | want = (101 - val) / 10 179 | assert got == want 180 | 181 | @mark.parametrize('val', range(1,10)) 182 | @mock.patch('dyno.app.api.control._range', mock.Mock(return_value={'Fr': [1,10]})) 183 | def test_denormalize(val): 184 | """ 185 | GIVEN values between 1-100 186 | WHEN the value is sent to be denormalized 187 | THEN the correct normalized value is returned 188 | """ 189 | got = ctl._denormalize_value('Fr', val) 190 | want = 100 - (val * 10) 191 | assert got == want 192 | --------------------------------------------------------------------------------