├── .github
├── PULL_REQUEST_TEMPLATE.md
├── filters.yaml
└── workflows
│ ├── integration.yml
│ ├── run.yml
│ └── update-containers.yml
├── .gitignore
├── Makefile
├── README.md
├── browser
├── README.MD
├── document-load.html
├── document-load.js
├── getting_started.md
├── images
│ ├── document-load-1-1.png
│ ├── document-load-1-2.png
│ ├── trace1-1.png
│ ├── trace1-2.png
│ ├── user-interaction-1-1.png
│ └── user-interaction-1-2.png
├── index.html
├── package.json
├── tracer.all.js
├── tracer.js
├── user-interaction.html
├── user-interaction.js
└── webpack.config.js
├── collector
├── activedirectoryds
│ ├── collector.yaml
│ ├── docker-compose.yaml
│ └── docker-composeV2.yaml
├── activemq
│ ├── Dockerfile
│ ├── cmd
│ │ ├── producer
│ │ │ └── main.go
│ │ └── subscriber
│ │ │ └── main.go
│ ├── collector.yml
│ ├── docker-compose.yml
│ └── go.mod
├── airflow
│ ├── README.md
│ ├── collector.yml
│ └── docker-compose.yml
├── apache
│ ├── collector.yml
│ ├── docker-compose.yml
│ └── httpd
│ │ ├── Dockerfile
│ │ ├── httpd-info.conf
│ │ └── httpd.conf
├── aws-eks
│ ├── lightstep-values.yaml
│ └── readme.md
├── cassandra
│ ├── Dockerfile
│ ├── collector.yml
│ ├── configuration
│ │ └── cassandra-env.sh
│ ├── docker-compose.yml
│ └── schema
│ │ └── cassandra
│ │ ├── 0_keyspace.cql
│ │ ├── 1_otelu_names.cql
│ │ ├── 2_otelu_races.cql
│ │ ├── 3_metric_times.cql
│ │ └── 4_rank_by_year_and_name.cql
├── cilium
│ ├── .gitignore
│ ├── Makefile
│ ├── README.md
│ ├── collector
│ │ ├── kustomization.yaml
│ │ ├── secret.yaml
│ │ └── values.yaml
│ └── kind-config.yaml
├── collectd
│ ├── 50docker-apt-conf
│ ├── Dockerfile
│ ├── collectd.conf
│ ├── config-prometheus.yaml
│ ├── docker-compose.yaml
│ ├── rootfs_prefix
│ │ ├── Makefile
│ │ ├── error.h
│ │ └── rootfs_prefix.c
│ └── run-collectd.sh
├── confluent-cloud
│ ├── README.md
│ ├── collector.yml
│ └── docker-compose.yml
├── consul
│ ├── README.md
│ ├── client1.json
│ ├── client2.json
│ ├── collector.yml
│ ├── docker-compose.yml
│ ├── intention-config-entry.json
│ ├── server.json
│ └── service-install.sh
├── coredns
│ ├── README.md
│ ├── collector.yml
│ ├── corefile.conf
│ └── docker-compose.yml
├── couchbase
│ ├── Makefile
│ ├── README.md
│ ├── collector.yml
│ ├── docker-compose.override.yml
│ └── docker-compose.yml
├── couchdb
│ ├── README.md
│ ├── collector.yml
│ ├── docker-compose.cluster.yml
│ └── docker-compose.yml
├── docker
│ ├── README.md
│ ├── dockerstats
│ │ ├── Dockerfile
│ │ ├── README.md
│ │ ├── collector.yml
│ │ ├── docker-compose.yml
│ │ └── metrics.csv
│ └── prometheus
│ │ ├── collector.yml
│ │ └── docker-compose.yml
├── elasticsearch
│ ├── .env
│ ├── .gitignore
│ ├── LICENSE
│ ├── Makefile
│ ├── README.md
│ ├── collector.yml
│ ├── configs
│ │ ├── elasticsearch.yml
│ │ └── log4j2.properties
│ ├── docker-compose.override.yml
│ ├── docker-compose.setup.yml
│ ├── docker-compose.yml
│ └── setup
│ │ ├── instances.yml
│ │ ├── keystore.sh
│ │ ├── setup-certs.sh
│ │ └── setup-keystore.sh
├── envoy
│ ├── Dockerfile-frontenvoy
│ ├── README.md
│ ├── app
│ │ ├── flask
│ │ │ ├── Dockerfile
│ │ │ ├── requirements.in
│ │ │ └── requirements.txt
│ │ └── tracing
│ │ │ ├── Dockerfile
│ │ │ ├── requirements.in
│ │ │ ├── requirements.txt
│ │ │ ├── service.py
│ │ │ └── start_service.sh
│ ├── collector.yml
│ ├── docker-compose.override.yaml
│ ├── docker-compose.yaml
│ ├── front-envoy.yaml
│ └── service-envoy.yaml
├── etcd
│ ├── Dockerfile
│ ├── README.md
│ ├── config-prometheus.cluster.yaml
│ ├── config-prometheus.single.yaml
│ ├── docker-compose.cluster.yaml
│ ├── docker-compose.single.yaml
│ ├── etcd.conf
│ └── start-etcd.sh
├── external-dns
│ ├── Makefile
│ ├── README.md
│ ├── etcd-cluster.yaml
│ ├── external-dns.yaml
│ ├── ingress.yaml
│ ├── install_collector.sh
│ ├── values-collector.yaml
│ └── values-coredns.yaml
├── flink
│ ├── .gitignore
│ ├── Dockerfile
│ ├── LICENSE
│ ├── README.md
│ ├── build.gradle.kts
│ ├── collector.yaml
│ ├── docker-compose.yml
│ ├── gradle
│ │ └── wrapper
│ │ │ ├── gradle-wrapper.jar
│ │ │ └── gradle-wrapper.properties
│ ├── gradlew
│ ├── gradlew.bat
│ └── src
│ │ ├── integrationTest
│ │ └── java
│ │ │ └── com
│ │ │ └── github
│ │ │ └── mbode
│ │ │ └── flink_prometheus_example
│ │ │ ├── FlinkIT.java
│ │ │ ├── GrafanaIT.java
│ │ │ └── PrometheusIT.java
│ │ ├── main
│ │ └── java
│ │ │ └── com
│ │ │ └── github
│ │ │ └── mbode
│ │ │ └── flink_prometheus_example
│ │ │ ├── FlinkMetricsExposingMapFunction.java
│ │ │ ├── PrometheusExampleJob.java
│ │ │ └── RandomSourceFunction.java
│ │ └── test
│ │ └── java
│ │ └── com
│ │ └── github
│ │ └── mbode
│ │ └── flink_prometheus_example
│ │ ├── CollectSink.java
│ │ ├── FlinkMetricsExposingMapFunctionTest.java
│ │ ├── PrometheusExampleJobTest.java
│ │ └── RandomSourceFunctionTest.java
├── fluentd
│ ├── README.md
│ ├── collector-configmap.yaml
│ ├── collector.yaml
│ ├── fluentd-configmap.yaml
│ ├── fluentd-rbac.yaml
│ └── fluentd.yaml
├── gitea
│ ├── README.md
│ ├── app.ini
│ ├── collector.yaml
│ └── docker-compose.yaml
├── grafana
│ ├── README.md
│ ├── collector.yaml
│ ├── docker-compose.yaml
│ ├── grafana.ini
│ ├── prometheus
│ │ └── prometheus.yml
│ └── provisioning
│ │ ├── dashboards
│ │ ├── default.yaml
│ │ ├── demo-dashboard.json
│ │ └── grafana-internals.json
│ │ └── datasources
│ │ └── default.yaml
├── gunicorn
│ ├── README.md
│ ├── collector.yaml
│ ├── docker-compose.yaml
│ └── web
│ │ ├── Dockerfile
│ │ ├── main.py
│ │ └── requirements.txt
├── hadoop
│ ├── .gitignore
│ ├── Dockerfile
│ ├── Makefile
│ ├── README.md
│ ├── collector.yml
│ ├── conf
│ │ ├── hadoop-env.sh
│ │ └── yarn-env.sh
│ ├── docker-compose.yml
│ ├── hadoop.env
│ └── submit
│ │ ├── Dockerfile
│ │ └── run.sh
├── haproxy
│ ├── README.md
│ ├── collector.yml
│ ├── docker-compose.yml
│ ├── haproxy.cfg
│ └── k8s
│ │ ├── Chart.yaml
│ │ ├── README.md
│ │ └── templates
│ │ ├── echoserver-deployment.yaml
│ │ ├── haproxy-claim0-persistentvolumeclaim.yaml
│ │ ├── haproxy-deployment.yaml
│ │ ├── haproxy-integrations-networkpolicy.yaml
│ │ ├── haproxy-service.yaml
│ │ ├── loadgen-deployment.yaml
│ │ ├── otel-collector-claim0-persistentvolumeclaim.yaml
│ │ ├── otel-collector-deployment.yaml
│ │ └── otel-collector-service.yaml
├── hbase
│ ├── .gitignore
│ ├── Dockerfile
│ ├── README.md
│ ├── collector.yml
│ ├── conf
│ │ └── hbase-env.sh
│ ├── docker-compose.yml
│ ├── hadoop.env
│ └── hbase.env
├── hostmetrics
│ ├── collector.yml
│ └── docker-compose.yml
├── httpcheck
│ ├── README.md
│ ├── collector.yml
│ └── docker-compose.yml
├── ibmmq
│ ├── collector.yaml
│ ├── docker-compose.yaml
│ └── mqs.ini
├── iis
│ ├── Dockerfile_collector
│ ├── Dockerfile_iis
│ ├── README.md
│ ├── collector.yml
│ └── docker-compose.yml
├── istio
│ ├── Dockerfile
│ ├── README.md
│ ├── go-istio-demo.yaml
│ ├── go.mod
│ ├── istio-operator.yaml
│ ├── kind-config.yaml
│ ├── lightstep-secret.yaml
│ ├── main.go
│ ├── otel-collector-config.yaml
│ ├── otel-collector-configmap.yaml
│ ├── otel-collector-deployment.yaml
│ ├── otel-collector-rbac.yaml
│ └── otel-collector-service.yaml
├── k8s-tracing
│ ├── apiserver-tracing.yaml
│ ├── containerd.toml
│ ├── crio.conf
│ ├── docker-compose.yml
│ ├── kubelet-tracing.yaml
│ ├── otel-collector-config.yaml
│ ├── readme.md
│ └── run-minikube.sh
├── kafka
│ ├── collector.yml
│ ├── docker-compose.yaml
│ └── src
│ │ ├── consumer
│ │ ├── Dockerfile
│ │ ├── consumer.go
│ │ └── main.go
│ │ └── producer
│ │ ├── Dockerfile
│ │ ├── main.go
│ │ └── procucer.go
├── kong
│ ├── Makefile
│ ├── README.md
│ ├── install_collector.sh
│ ├── kind-config.yaml
│ └── values-collector.yaml
├── kubernetes
│ ├── Makefile
│ ├── README.md
│ ├── collector-configmap.yaml
│ ├── collector-rbac.yaml
│ ├── collector.yaml
│ └── metrics.csv
├── memcached
│ ├── README.md
│ ├── collector.yml
│ └── docker-compose.yml
├── minio
│ ├── README.md
│ ├── collector.yaml
│ └── docker-compose.yml
├── mongodb
│ ├── collector.yml
│ ├── docker-compose.yml
│ ├── mongo-entrypoint
│ │ ├── init-users.sh
│ │ └── seed-users.js
│ └── mongodb-batch
│ │ ├── go.mod
│ │ ├── go.sum
│ │ ├── main.go
│ │ └── pkg
│ │ └── generator
│ │ └── generator.go
├── multi-project-collector
│ ├── README.md
│ ├── config
│ │ └── collector.yml
│ └── docker-compose.yml
├── mysql
│ ├── collector.yml
│ ├── docker-compose.yml
│ └── scripts
│ │ └── setup.sh
├── nginx-ingresscontroller
│ ├── .gitignore
│ ├── Makefile
│ ├── README.md
│ ├── collector
│ │ ├── kustomization.yaml
│ │ ├── secret.yaml
│ │ └── values.yaml
│ └── ingress
│ │ ├── default-server-secret.yaml
│ │ └── values.yaml
├── nginx
│ ├── README.md
│ ├── collector.yml
│ ├── docker-compose.yml
│ ├── nginx-appsrv.conf
│ ├── nginx-proxy.conf
│ └── nginx_test.go
├── pgbouncer
│ ├── collector.yml
│ ├── docker-compose.yml
│ └── sql
│ │ ├── example_create_tables.sql
│ │ └── fake_data_fill_tables.sql
├── php-fpm
│ ├── README.md
│ ├── collector.yml
│ ├── docker-compose.yml
│ └── www.conf
├── postgres
│ ├── README.md
│ ├── collector.yml
│ ├── docker-compose.yml
│ └── sql
│ │ ├── example_create_tables.sql
│ │ └── fake_data_fill_tables.sql
├── powerdns
│ ├── README.md
│ ├── collector.yml
│ ├── docker-compose.yml
│ └── recursor.conf
├── prom-native-arangodb
│ ├── README.md
│ ├── arangodb.conf
│ ├── collector.yml
│ └── docker-compose.yml
├── prom-native-ceph
│ ├── README.md
│ ├── collector.yml
│ └── docker-compose.yml
├── prom-native-clickhouse
│ ├── README.md
│ ├── collector.yml
│ ├── config
│ │ └── prom_conf.xml
│ └── docker-compose.yml
├── prom-native-cockroachdb
│ ├── README.md
│ ├── collector.yml
│ ├── config
│ │ └── roach-init.sh
│ └── docker-compose.yml
├── prom-native-micrometer
│ ├── .gitignore
│ ├── README.md
│ ├── backend
│ │ ├── Dockerfile
│ │ ├── pom.xml
│ │ └── src
│ │ │ └── main
│ │ │ ├── java
│ │ │ └── otel
│ │ │ │ └── example
│ │ │ │ └── micrometer
│ │ │ │ ├── Application.java
│ │ │ │ ├── controllers
│ │ │ │ └── HomeController.java
│ │ │ │ ├── entity
│ │ │ │ └── Greeting.java
│ │ │ │ └── repository
│ │ │ │ └── GreetingRepository.java
│ │ │ └── resources
│ │ │ ├── application.properties
│ │ │ ├── data.sql
│ │ │ ├── schema.sql
│ │ │ └── templates
│ │ │ └── home.ftlh
│ ├── collector.yml
│ ├── db
│ │ └── password.txt
│ └── docker-compose.yaml
├── prom-native-nomad
│ ├── README.md
│ ├── collector.yml
│ ├── docker-compose.yml
│ └── nomad
│ │ └── config
│ │ └── local.json
├── prom-native-scylla
│ ├── README.md
│ ├── collector.yaml
│ └── docker-compose.yaml
├── prom-native-singlestore
│ ├── README.md
│ ├── collector.yml
│ ├── docker-compose.yml
│ └── init.sql
├── prom-native-vault
│ ├── README.md
│ ├── collector.yml
│ ├── consul
│ │ └── config
│ │ │ └── consul-config.json
│ ├── docker-compose.yml
│ └── vault
│ │ └── config
│ │ ├── policies
│ │ └── prometheus-metrics.hcl
│ │ └── server.hcl
├── rabbitmq
│ ├── cmd
│ │ └── rabbitmq-simulator
│ │ │ ├── go.mod
│ │ │ ├── go.sum
│ │ │ └── rabbitmq-client.go
│ ├── config.yaml
│ ├── docker-compose.yaml
│ ├── rabbitmq-leader.conf
│ ├── rabbitmq-node-2.conf
│ └── rabbitmq-node-3.conf
├── redis
│ ├── Dockerfile
│ ├── collector.yml
│ ├── docker-compose.yml
│ └── redis-loadgen-client.go
├── snmp
│ ├── Dockerfile_snmp
│ ├── README.md
│ ├── collector.yml
│ ├── docker-compose.yml
│ ├── metrics.csv
│ └── snmpd.conf
├── solr
│ ├── .gitignore
│ ├── Dockerfile
│ ├── README.md
│ ├── collector.yml
│ ├── docker-compose.yml
│ ├── metrics.csv
│ └── nodeapp
│ │ ├── .gitignore
│ │ ├── index.js
│ │ ├── package.json
│ │ └── people.json
├── squid
│ ├── README.md
│ ├── collector.yml
│ ├── docker-compose.yml
│ └── squid.conf
├── statsd
│ ├── README.md
│ ├── collector.yaml
│ ├── docker-compose.yaml
│ └── generator.sh
├── tomcat
│ ├── Dockerfile
│ ├── collector.yml
│ └── docker-compose.yml
├── vanilla
│ ├── collector.yaml
│ └── readme.md
├── varnish
│ ├── Dockerfile
│ ├── README.md
│ ├── collector.yml
│ ├── docker-compose.yml
│ ├── docker-varnish-entrypoint
│ ├── nginx-appsrv.conf
│ └── varnish.vcl
└── zookeeper
│ ├── Makefile
│ ├── README.md
│ ├── collector.yml
│ ├── docker-compose.override.yml
│ ├── docker-compose.yml
│ └── zoo.cfg
├── config
├── example-aws-collector-config.yaml
├── example-collector-config.yaml
├── example.env
└── integration.yml
├── demo-client
└── otlp
│ ├── Dockerfile
│ └── client.py
├── docker-compose.yml
├── go
├── opentelemetry
│ ├── collector
│ │ ├── client
│ │ │ ├── Dockerfile
│ │ │ ├── client.go
│ │ │ ├── go.mod
│ │ │ └── go.sum
│ │ └── server
│ │ │ ├── Dockerfile
│ │ │ ├── go.mod
│ │ │ ├── go.sum
│ │ │ └── server.go
│ └── otlp
│ │ ├── client
│ │ ├── Dockerfile
│ │ ├── client-http.go
│ │ ├── client.go
│ │ ├── go.mod
│ │ └── go.sum
│ │ ├── env_vars.sh
│ │ └── server
│ │ ├── Dockerfile
│ │ ├── go.mod
│ │ ├── go.sum
│ │ ├── server-http.go
│ │ └── server.go
└── opentracing
│ ├── client
│ ├── Dockerfile
│ ├── client.go
│ ├── go.mod
│ └── go.sum
│ └── server
│ ├── Dockerfile
│ ├── go.mod
│ ├── go.sum
│ └── server.go
├── java
├── Makefile
├── client
│ ├── Dockerfile
│ ├── Makefile
│ ├── pom.xml
│ └── src
│ │ └── main
│ │ └── java
│ │ └── com
│ │ └── lightstep
│ │ └── examples
│ │ └── client
│ │ └── App.java
├── microdonuts
│ ├── Dockerfile
│ ├── README.md
│ ├── client
│ │ ├── client.js
│ │ ├── css
│ │ │ └── styles.css
│ │ ├── img
│ │ │ ├── donut-choc.jpg
│ │ │ ├── donut-cinn.jpg
│ │ │ ├── donut-glazed.png
│ │ │ ├── donut-jelly.jpg
│ │ │ ├── donut-old-fash.jpg
│ │ │ ├── donut-sprinkles.png
│ │ │ └── donuts.png
│ │ ├── index.html
│ │ ├── js
│ │ │ ├── jquery-3.2.0.min.js
│ │ │ ├── lightstep-tracer.js
│ │ │ └── opentracing-browser.js
│ │ └── order.js
│ ├── pom.xml
│ └── src
│ │ └── main
│ │ └── java
│ │ └── com
│ │ └── otsample
│ │ └── api
│ │ ├── ApiContextHandler.java
│ │ ├── App.java
│ │ ├── KitchenConsumer.java
│ │ ├── KitchenContextHandler.java
│ │ ├── KitchenService.java
│ │ ├── Utils.java
│ │ └── resources
│ │ ├── Donut.java
│ │ ├── DonutAddRequest.java
│ │ ├── DonutRequest.java
│ │ ├── Status.java
│ │ ├── StatusReq.java
│ │ └── StatusRes.java
├── otlp
│ ├── Dockerfile.client
│ ├── Dockerfile.server
│ ├── Makefile
│ ├── pom-client.xml
│ ├── pom-server.xml
│ └── src
│ │ └── main
│ │ └── java
│ │ └── com
│ │ └── lightstep
│ │ └── otlp
│ │ ├── client
│ │ └── Client.java
│ │ └── server
│ │ ├── ApiContextHandler.java
│ │ └── ExampleServer.java
├── ottrace
│ ├── Dockerfile.client
│ ├── Dockerfile.server
│ ├── Makefile
│ ├── pom-client.xml
│ ├── pom-server.xml
│ └── src
│ │ └── main
│ │ └── java
│ │ └── com
│ │ └── lightstep
│ │ └── ottrace
│ │ ├── client
│ │ └── Client.java
│ │ └── server
│ │ ├── ApiContextHandler.java
│ │ └── ExampleServer.java
└── server
│ ├── Dockerfile
│ ├── Makefile
│ ├── pom.xml
│ └── src
│ └── main
│ └── java
│ └── com
│ └── lightstep
│ └── examples
│ └── server
│ ├── ApiContextHandler.java
│ └── App.java
├── nodejs
├── README.md
├── client.js
├── first-trace.js
├── otel-vanilla
│ ├── metrics
│ │ ├── README.md
│ │ ├── app.js
│ │ └── package.json
│ └── tracing
│ │ ├── app.js
│ │ ├── index.js
│ │ └── package.json
├── package.json
└── server.js
├── operator
└── java-autoinst
│ ├── readme.md
│ └── values.yaml
├── python
├── opentelemetry
│ ├── auto_instrumentation
│ │ ├── Dockerfile.client
│ │ ├── Dockerfile.server
│ │ ├── README.md
│ │ ├── client.py
│ │ ├── requirements.txt
│ │ └── server.py
│ └── manual_instrumentation
│ │ ├── Dockerfile.client
│ │ ├── Dockerfile.server
│ │ ├── README.md
│ │ ├── client.py
│ │ ├── common.py
│ │ ├── requirements.txt
│ │ └── server.py
└── opentracing
│ ├── Dockerfile.client
│ ├── Dockerfile.server
│ ├── client.py
│ ├── requirements.txt
│ └── server.py
├── telegraf
├── http
│ ├── README.md
│ ├── app
│ │ └── main.go
│ ├── docker-compose.yml
│ └── telegraf
│ │ └── telegraf.conf
├── influxdb-migrate
│ ├── README.md
│ ├── collector.yml
│ ├── docker-compose.yml
│ └── telegraf.conf
├── monit
│ ├── README.md
│ └── telegraf
│ │ ├── out-stream.json
│ │ └── telegraf.conf
├── mqtt
│ ├── README.md
│ ├── docker-compose.yml
│ ├── mosquitto
│ │ └── config
│ │ │ └── mosquitto.conf
│ └── telegraf
│ │ └── telegraf.conf
└── net_response
│ ├── README.md
│ ├── app
│ └── main.go
│ ├── docker-compose.yml
│ └── telegraf
│ └── telegraf.conf
└── tools
├── integration
├── Dockerfile
├── generate_config.py
├── requirements.txt
└── test.py
├── precommit.sh
├── templates
├── config-prometheus.yaml
├── config.yaml
└── docker-compose.yaml
└── update-token.sh
/.github/PULL_REQUEST_TEMPLATE.md:
--------------------------------------------------------------------------------
1 | ## Description
2 | What does this PR do?
3 |
4 | ## PR checklist for examples
5 |
6 | Check these items before merging the PR.
7 |
8 | - [] Each example contains a README file.
9 | - [] README documents all steps and variables required to run the example. The simplest way to confirm this is to follow the instructions on a clean machine.
10 | - [] README file contains the links to the official documentation with relevant configuration and to the metrics published if that is available.
11 | - [] Used images of apps and collectors have to be pinned, "latest" should never be used.
12 | - [] Evidences with screenshots provided from the reviewer.
13 | - [] Includes a file called `metrics.csv` with metrics produced by the example.
14 | The file should have these 5 headings: Name, Description, Unit, DataType, Attributes.
15 | Description is not provided for all metrics, so it may be blank. Attributes are also not always provided. When there are multiple attributes on a metric reord they should be space separated.
16 |
--------------------------------------------------------------------------------
/.github/filters.yaml:
--------------------------------------------------------------------------------
1 | code-examples:
2 | - 'go/**'
3 | - 'java/**'
4 | - 'nodejs/**'
5 | - 'python/**'
6 | collector:
7 | - 'collector/**'
8 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | node_modules/
2 | build/
3 | .npm
4 | .eslintcache
5 | package-lock.json
6 | .DS_Store
7 | .vscode/
8 | .idea/
9 | *.iml
10 | .env*
11 | target
12 | dependency-reduced-pom.xml
13 | *.jar
14 | .swp
15 | config/collector-config.yaml
16 | *scratchpad*
17 | venv*/
18 | *.pyc
19 | __pycache__
20 | *.bak*
21 |
--------------------------------------------------------------------------------
/browser/README.MD:
--------------------------------------------------------------------------------
1 | ## Getting Started with OpenTelemetry
2 | This repo contains the source code that accompanies the [browser getting started guide](https://docs.lightstep.com/otel/js-getting-started-with-opentelemetry) on the [Cloud Observability Learning Portal](https://docs.lightstep.com/otel/what-is-opentelemetry).
3 | Visit [the portal](https://docs.lightstep.com/otel/what-is-opentelemetry) for this and other great resources on OpenTelemetry.
4 |
5 |
--------------------------------------------------------------------------------
/browser/document-load.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | Cloud Observability Web Example - document load auto-instrumentation
6 |
7 |
8 |
9 |
10 |
11 | Document Load
12 | See output in the web developer console
13 |
14 |
15 |
--------------------------------------------------------------------------------
/browser/document-load.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | import { WebTracerProvider } from '@opentelemetry/web';
4 | import { ConsoleSpanExporter, SimpleSpanProcessor } from '@opentelemetry/tracing';
5 | import { CollectorTraceExporter } from '@opentelemetry/exporter-collector';
6 | import { DocumentLoad } from '@opentelemetry/plugin-document-load';
7 | import { ZoneContextManager } from '@opentelemetry/context-zone';
8 |
9 | // Create a provider for activating and tracking spans
10 | const tracerProvider = new WebTracerProvider({
11 | plugins: [
12 | new DocumentLoad(),
13 | ],
14 | });
15 |
16 | // Configure a span processor and exporter for the tracer
17 | tracerProvider.addSpanProcessor(new SimpleSpanProcessor(new ConsoleSpanExporter()));
18 | tracerProvider.addSpanProcessor(new SimpleSpanProcessor(new CollectorTraceExporter({
19 | url: 'https://ingest.lightstep.com:443/api/v2/otel/trace',
20 | headers: {
21 | 'Lightstep-Access-Token': 'YOUR_TOKEN'
22 | }
23 | })));
24 |
25 | // Register the tracer
26 | tracerProvider.register({
27 | contextManager: new ZoneContextManager(),
28 | });
29 |
--------------------------------------------------------------------------------
/browser/images/document-load-1-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lightstep/opentelemetry-examples/f074eff0fb916a708a524e36f1ba0ecd3ea774c3/browser/images/document-load-1-1.png
--------------------------------------------------------------------------------
/browser/images/document-load-1-2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lightstep/opentelemetry-examples/f074eff0fb916a708a524e36f1ba0ecd3ea774c3/browser/images/document-load-1-2.png
--------------------------------------------------------------------------------
/browser/images/trace1-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lightstep/opentelemetry-examples/f074eff0fb916a708a524e36f1ba0ecd3ea774c3/browser/images/trace1-1.png
--------------------------------------------------------------------------------
/browser/images/trace1-2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lightstep/opentelemetry-examples/f074eff0fb916a708a524e36f1ba0ecd3ea774c3/browser/images/trace1-2.png
--------------------------------------------------------------------------------
/browser/images/user-interaction-1-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lightstep/opentelemetry-examples/f074eff0fb916a708a524e36f1ba0ecd3ea774c3/browser/images/user-interaction-1-1.png
--------------------------------------------------------------------------------
/browser/images/user-interaction-1-2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lightstep/opentelemetry-examples/f074eff0fb916a708a524e36f1ba0ecd3ea774c3/browser/images/user-interaction-1-2.png
--------------------------------------------------------------------------------
/browser/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | Cloud Observability Web Example
6 |
7 |
8 |
9 |
10 |
11 |
12 | Tracer example
13 | Open the web developer console
14 |
15 |
16 |
--------------------------------------------------------------------------------
/browser/user-interaction.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | Cloud Observability Web Example - user interaction
6 |
7 |
8 |
9 |
10 |
11 | User Interaction
12 | Open the web developer console and click the button below
13 |
14 |
15 |
16 |
17 |
--------------------------------------------------------------------------------
/browser/webpack.config.js:
--------------------------------------------------------------------------------
1 | module.exports = {
2 | mode: 'development',
3 | entry: {
4 | 'document-load': 'document-load.js',
5 | tracer: 'tracer.js',
6 | 'user-interaction': 'user-interaction.js',
7 | },
8 | output: {
9 | filename: '[name].js',
10 | sourceMapFilename: '[file].map',
11 | },
12 | target: 'web',
13 | module: {
14 | rules: [
15 | {
16 | test: /\.js$/,
17 | exclude: /(node_modules)/,
18 | use: { loader: 'babel-loader' },
19 | },
20 | ],
21 | },
22 | resolve: { modules: [__dirname, 'node_modules'], extensions: ['.js'] },
23 | devtool: 'eval-source-map',
24 | };
25 |
--------------------------------------------------------------------------------
/collector/activedirectoryds/collector.yaml:
--------------------------------------------------------------------------------
1 | receivers:
2 | active_directory_ds:
3 | collection_interval: 10s
4 | metrics:
5 | # Disable the active_directory.ds.replication.network.io metric from being emitted
6 | active_directory.ds.replication.network.io: false
7 |
8 | exporters:
9 | logging:
10 | loglevel: debug
11 |
12 | otlp:
13 | endpoint: ingest.lightstep.com:443
14 | headers:
15 | "lightstep-access-token": "${LS_ACCESS_TOKEN}"
16 |
17 | processors:
18 | batch:
19 |
20 | service:
21 | pipelines:
22 | metrics:
23 | receivers: [active_directory_ds]
24 | processors: [batch]
25 | exporters: [logging, otlp]
--------------------------------------------------------------------------------
/collector/activemq/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM curlimages/curl:7.82.0 as curler
2 | ARG JMX_JAR_VERSION=v1.14.0
3 | USER root
4 |
5 | RUN curl -L \
6 | --output /opentelemetry-jmx-metrics.jar \
7 | "https://github.com/open-telemetry/opentelemetry-java-contrib/releases/download/${JMX_JAR_VERSION}/opentelemetry-jmx-metrics.jar"
8 |
9 | RUN curl -L -s \
10 | "https://github.com/open-telemetry/opentelemetry-collector-releases/releases/download/v0.53.0/otelcol-contrib_0.53.0_linux_amd64.tar.gz" | \
11 | tar -xvz -C /
12 |
13 | FROM ibmjava:8-jre
14 | WORKDIR /
15 |
16 | COPY --from=curler /opentelemetry-jmx-metrics.jar /opt/opentelemetry-jmx-metrics.jar
17 | COPY --from=curler /otelcol-contrib /otelcol-contrib
18 |
19 | ENTRYPOINT [ "/otelcol-contrib" ]
20 | CMD ["--config", "/etc/otel/config.yaml"]
--------------------------------------------------------------------------------
/collector/activemq/cmd/producer/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "fmt"
5 | "github.com/go-stomp/stomp"
6 | )
7 |
8 | func Producer(c, quit chan string, conn *stomp.Conn) {
9 | for {
10 | select {
11 | case c <- "msg sent":
12 | err := conn.Send(
13 | "/queue/myqueue1", //destination
14 | "text/plain", //content-type
15 | []byte("Test message #1")) //body
16 | if err != nil {
17 | fmt.Println(err)
18 | return
19 | }
20 | case <-quit:
21 | fmt.Println("finish")
22 | return
23 | }
24 | }
25 | }
26 |
27 | //Connect to ActiveMQ and produce messages
28 | func main() {
29 | conn, err := stomp.Dial("tcp", "localhost:61613")
30 | if err != nil {
31 | fmt.Println(err)
32 | }
33 |
34 | c := make(chan string)
35 | quit := make(chan string)
36 | go Producer(c, quit, conn)
37 |
38 | for {
39 | fmt.Println(<-c)
40 | }
41 | quit <- "read"
42 | }
43 |
--------------------------------------------------------------------------------
/collector/activemq/cmd/subscriber/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "fmt"
5 | "github.com/go-stomp/stomp"
6 | )
7 |
8 | //Connect to ActiveMQ and listen for messages
9 | func main() {
10 | conn, err := stomp.Dial("tcp", "localhost:61613")
11 | if err != nil {
12 | fmt.Println(err)
13 | }
14 |
15 | sub, err := conn.Subscribe("/queue/myqueue1", stomp.AckAuto)
16 | if err != nil {
17 | fmt.Println(err)
18 | }
19 | for {
20 | msg := <-sub.C
21 | fmt.Println(msg)
22 | }
23 |
24 | err = sub.Unsubscribe()
25 | if err != nil {
26 | fmt.Println(err)
27 | }
28 | defer conn.Disconnect()
29 | }
30 |
--------------------------------------------------------------------------------
/collector/activemq/collector.yml:
--------------------------------------------------------------------------------
1 | receivers:
2 | jmx/activemq:
3 | jar_path: /opt/opentelemetry-jmx-metrics.jar
4 | endpoint: activemq:10991
5 | target_system: jvm,activemq
6 |
7 | exporters:
8 | logging:
9 | loglevel: debug
10 | otlp:
11 | endpoint: ingest.lightstep.com:443
12 | headers:
13 | "lightstep-access-token": "${LS_ACCESS_TOKEN}"
14 |
15 | processors:
16 | batch:
17 |
18 | service:
19 | pipelines:
20 | metrics:
21 | receivers: [jmx/activemq]
22 | processors: [batch]
23 | exporters: [logging, otlp]
24 |
--------------------------------------------------------------------------------
/collector/activemq/go.mod:
--------------------------------------------------------------------------------
1 | module activemq
2 |
3 | go 1.18
4 |
5 | require github.com/go-stomp/stomp v2.1.4+incompatible // indirect
6 |
--------------------------------------------------------------------------------
/collector/airflow/collector.yml:
--------------------------------------------------------------------------------
1 | receivers:
2 | statsd:
3 | endpoint: "otel-collector:8125"
4 | aggregation_interval: 60s
5 | is_monotonic_counter: true
6 | timer_histogram_mapping:
7 | - statsd_type: "histogram"
8 | observer_type: "summary"
9 | - statsd_type: "timing"
10 | observer_type: "summary"
11 |
12 | exporters:
13 | logging:
14 | loglevel: debug
15 | otlp/public:
16 | endpoint: ingest.lightstep.com:443
17 | headers:
18 | "lightstep-access-token": "${LS_ACCESS_TOKEN}"
19 |
20 | processors:
21 | batch:
22 |
23 | service:
24 | pipelines:
25 | metrics:
26 | receivers: [statsd]
27 | processors: [batch]
28 | exporters: [logging, otlp/public]
29 |
--------------------------------------------------------------------------------
/collector/apache/collector.yml:
--------------------------------------------------------------------------------
1 | receivers:
2 | apache:
3 | endpoint: "http://apache:80/server-status?auto"
4 |
5 | exporters:
6 | logging:
7 | loglevel: debug
8 | # configuring otlp to Cloud Observability
9 | otlp:
10 | endpoint: ingest.lightstep.com:443
11 | headers:
12 | "lightstep-access-token": "${LS_ACCESS_TOKEN}"
13 |
14 | processors:
15 | batch:
16 |
17 | service:
18 | pipelines:
19 | metrics:
20 | receivers: [apache]
21 | processors: [batch]
22 | exporters: [logging, otlp]
23 |
--------------------------------------------------------------------------------
/collector/apache/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '3.9'
2 | services:
3 | apache:
4 | build: ./httpd
5 | container_name: apache
6 | ports:
7 | - '8080:80'
8 | networks:
9 | - integrations
10 | volumes:
11 | - ./httpd/website:/usr/local/apache2/htdocs
12 | stop_grace_period: 1s
13 | otel-collector:
14 | container_name: otel-collector
15 | image: otel/opentelemetry-collector-contrib:0.50.0
16 | command: ["--config=/conf/collector.yml"]
17 | environment:
18 | LS_ACCESS_TOKEN: ${LS_ACCESS_TOKEN}
19 | networks:
20 | - integrations
21 | volumes:
22 | - ./collector.yml:/conf/collector.yml:rw
23 | networks:
24 | integrations:
25 |
--------------------------------------------------------------------------------
/collector/apache/httpd/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM httpd:2.4
2 | COPY ./httpd.conf /usr/local/apache2/conf/httpd.conf
3 | COPY ./httpd-info.conf /usr/local/apache2/conf/extra/httpd-info.conf
4 |
--------------------------------------------------------------------------------
/collector/aws-eks/lightstep-values.yaml:
--------------------------------------------------------------------------------
1 | opentelemetry-collector:
2 | config:
3 | processors:
4 | resourcedetection/eks:
5 | detectors: [env, ec2, eks]
6 | timeout: 2s
7 | override: false
8 | exporters:
9 | logging:
10 | verbosity: normal
11 | sampling_initial: 5
12 | sampling_thereafter: 200
13 | otlp/ls:
14 | endpoint: ingest.lightstep.com:443
15 | headers:
16 | "lightstep-access-token": ""
17 |
18 | service:
19 | pipelines:
20 | traces:
21 | receivers: [otlp]
22 | processors: [resourcedetection/eks, batch]
23 | exporters: [logging, otlp/ls]
24 | metrics:
25 | receivers: [otlp]
26 | processors: [resourcedetection/eks, batch]
27 | exporters: [logging, otlp/ls]
28 |
--------------------------------------------------------------------------------
/collector/cassandra/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM curlimages/curl:7.82.0 as curler
2 | ARG JMX_JAR_VERSION=v1.14.0
3 | USER root
4 |
5 | RUN curl -L \
6 | --output /opentelemetry-jmx-metrics.jar \
7 | "https://github.com/open-telemetry/opentelemetry-java-contrib/releases/download/${JMX_JAR_VERSION}/opentelemetry-jmx-metrics.jar"
8 |
9 | RUN curl -L -s \
10 | "https://github.com/open-telemetry/opentelemetry-collector-releases/releases/download/v0.53.0/otelcol-contrib_0.53.0_linux_amd64.tar.gz" | \
11 | tar -xvz -C /
12 |
13 | FROM ibmjava:8-jre
14 | WORKDIR /
15 |
16 | COPY --from=curler /opentelemetry-jmx-metrics.jar /opt/opentelemetry-jmx-metrics.jar
17 | COPY --from=curler /otelcol-contrib /otelcol-contrib
18 |
19 | ENTRYPOINT [ "/otelcol-contrib" ]
20 | CMD ["--config", "/etc/otel/config.yaml"]
--------------------------------------------------------------------------------
/collector/cassandra/collector.yml:
--------------------------------------------------------------------------------
1 | receivers:
2 | jmx/cassandra:
3 | jar_path: /opt/opentelemetry-jmx-metrics.jar
4 | endpoint: cassandra:17199
5 | target_system: jvm,cassandra
6 | collection_interval: 5s
7 |
8 | exporters:
9 | logging:
10 | loglevel: debug
11 |
12 | otlp:
13 | endpoint: ingest.lightstep.com:443
14 | headers:
15 | "lightstep-access-token": "${LS_ACCESS_TOKEN}"
16 |
17 | processors:
18 | batch:
19 |
20 | service:
21 | pipelines:
22 | metrics:
23 | receivers: [jmx/cassandra]
24 | processors: [batch]
25 | exporters: [logging, otlp]
26 |
--------------------------------------------------------------------------------
/collector/cassandra/schema/cassandra/0_keyspace.cql:
--------------------------------------------------------------------------------
1 | CREATE KEYSPACE IF NOT EXISTS otel
2 | WITH REPLICATION = {
3 | 'class' : 'SimpleStrategy',
4 | 'replication_factor' : 1
5 | };
--------------------------------------------------------------------------------
/collector/cassandra/schema/cassandra/1_otelu_names.cql:
--------------------------------------------------------------------------------
1 | // -------------- otelu_name ---
2 | DROP TABLE IF EXISTS otel.otelu_name;
3 |
4 | CREATE TABLE otel.otelu_name (
5 | id UUID PRIMARY KEY,
6 | lastname text,
7 | firstname text
8 | );
9 |
10 | // Insert a record that only contains the min values UUID
11 | INSERT INTO otel.otelu_name (id) VALUES (uuid());
12 |
13 | // Remove the record from the table
14 | TRUNCATE otel.otelu_name;
15 |
16 | INSERT INTO otel.otelu_name (id, lastname, firstname) VALUES (e7cd5752-bc0d-4157-a80f-7523add8dbcd, 'VAN DER BREGGEN', 'Anna');
17 | INSERT INTO otel.otelu_name (id, lastname, firstname) VALUES (e7ae5cf3-d358-4d99-b900-85902fda9bb0, 'FRAME', 'Alex');
18 | INSERT INTO otel.otelu_name (id, lastname, firstname) VALUES (220844bf-4860-49d6-9a4b-6b5d3a79cbfb, 'TIRALONGO', 'Paolo');
19 | INSERT INTO otel.otelu_name (id, lastname, firstname) VALUES (6ab09bec-e68e-48d9-a5f8-97e6fb4c9b47, 'KRUIKSWIJK', 'Steven');
20 | INSERT INTO otel.otelu_name (id, lastname, firstname) VALUES (fb372533-eb95-4bb4-8685-6ef61e994caa, 'MATTHEWS', 'Michael');
--------------------------------------------------------------------------------
/collector/cilium/.gitignore:
--------------------------------------------------------------------------------
1 | .patch.token.yaml
2 |
--------------------------------------------------------------------------------
/collector/cilium/collector/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 | resources:
4 | - values.yaml
5 | - secret.yaml
6 | patchesStrategicMerge:
7 | - ./.patch.token.yaml
8 |
--------------------------------------------------------------------------------
/collector/cilium/collector/secret.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Secret
3 | metadata:
4 | name: lightstep-secret
5 | type:
6 | stringData:
7 | lightstep_access_token:
8 |
--------------------------------------------------------------------------------
/collector/cilium/kind-config.yaml:
--------------------------------------------------------------------------------
1 | kind: Cluster
2 | apiVersion: kind.x-k8s.io/v1alpha4
3 | name: cilium-demo-cluster
4 | nodes:
5 | - role: control-plane
6 | - role: worker
7 | - role: worker
8 | - role: worker
9 | networking:
10 | disableDefaultCNI: true
11 | podSubnet: "10.10.0.0/16"
12 | serviceSubnet: "10.11.0.0/16"
13 |
--------------------------------------------------------------------------------
/collector/collectd/50docker-apt-conf:
--------------------------------------------------------------------------------
1 | APT::Install-Recommends "1";
2 | APT::Install-Suggests "1";
3 | APT::Get::Assume-Yes "1";
4 | APT::Get::AutomaticRemove "1";
5 |
--------------------------------------------------------------------------------
/collector/collectd/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM debian:stable-slim
2 |
3 | ENV DEBIAN_FRONTEND noninteractive
4 | COPY 50docker-apt-conf /etc/apt/apt.conf.d/
5 |
6 | COPY rootfs_prefix/ /usr/src/rootfs_prefix/
7 |
8 | RUN apt-get update \
9 | && apt-get upgrade \
10 | && apt-get install \
11 | collectd-core \
12 | collectd-utils \
13 | build-essential \
14 | zsh \
15 | && make -C /usr/src/rootfs_prefix/ \
16 | && apt-get --purge remove build-essential \
17 | && apt-get clean \
18 | && rm -rf /var/lib/apt/lists/*
19 |
20 | COPY collectd.conf /etc/collectd/collectd.conf
21 | COPY run-collectd.sh /usr/local/sbin/run-collectd.sh
22 |
23 | RUN chmod u+x /usr/local/sbin/run-collectd.sh
24 |
25 | ENV LD_PRELOAD /usr/src/rootfs_prefix/rootfs_prefix.so
26 |
27 | EXPOSE 9103
28 |
29 | ENTRYPOINT ["/usr/local/sbin/run-collectd.sh", "8"]
30 |
--------------------------------------------------------------------------------
/collector/collectd/collectd.conf:
--------------------------------------------------------------------------------
1 |
2 | LoadPlugin write_prometheus
3 | LoadPlugin postgresql
4 |
5 |
6 | Port 9103
7 |
8 |
9 |
10 |
11 | Host "pgsql"
12 | Port "5432"
13 | User "otel"
14 | Password "otel"
15 | SSLMode "prefer"
16 | KRBSrvName "kerberos_service_name"
17 | Query magic
18 |
19 |
20 |
--------------------------------------------------------------------------------
/collector/collectd/config-prometheus.yaml:
--------------------------------------------------------------------------------
1 | receivers:
2 | prometheus/collectd:
3 | use_start_time_metric: false
4 | start_time_metric_regex: '^(.+_)*process_start_time_seconds$'
5 | config:
6 | scrape_configs:
7 | - job_name: 'component-scraper'
8 | scrape_interval: 5s
9 | metrics_path: "/metrics"
10 | static_configs:
11 | - targets: ["collectd:9103"]
12 | exporters:
13 | otlp:
14 | endpoint: ingest.lightstep.com:443
15 | headers:
16 | - lightstep-access-token: ${LS_ACCESS_TOKEN}
17 | service:
18 | pipelines:
19 | metrics:
20 | receivers: [prometheus/collectd]
21 | exporters: [otlp]
22 |
--------------------------------------------------------------------------------
/collector/collectd/docker-compose.yaml:
--------------------------------------------------------------------------------
1 | version: "3.7"
2 |
3 | services:
4 | collectd:
5 | container_name: collectd
6 | hostname: collectd
7 | build: .
8 | depends_on:
9 | - "pgsql"
10 | networks:
11 | - integration
12 | ports:
13 | - "9103:9103"
14 | otel-collector:
15 | container_name: otel-collect
16 | hostname: otel-collector
17 | image: otel/opentelemetry-collector-contrib:0.50.0
18 | command: ["--config=/conf/config-prometheus.yaml"]
19 | environment:
20 | LS_ACCESS_TOKEN: ${LS_ACCESS_TOKEN}
21 | networks:
22 | - integration
23 | volumes:
24 | - ./config-prometheus.yaml:/conf/config-prometheus.yaml:rw
25 | pgsql:
26 | container_name: pgsql
27 | hostname: pgsql
28 | image: postgres
29 | environment:
30 | POSTGRES_USER: "otel"
31 | POSTGRES_PASSWORD: "otel"
32 | networks:
33 | - integration
34 | ports:
35 | - "5432:5432"
36 |
37 | networks:
38 | integration:
39 | driver: bridge
40 |
--------------------------------------------------------------------------------
/collector/collectd/rootfs_prefix/Makefile:
--------------------------------------------------------------------------------
1 | CC = gcc
2 | CFLAGS = -Wall -Werror -fPIC -shared
3 | DIRECTIVE = /usr/local/src/rootfs_prefix
4 |
5 | # rootfs_prefix.so creates a shared object file for rootfs_prefix.c
6 | # which is preloaded with LD_PRELOAD
7 | rootfs_prefix.so: rootfs_prefix.c
8 | $(CC) -I$(DIRECTIVE) $(CFLAGS) -o rootfs_prefix.so rootfs_prefix.c -ldl
9 |
--------------------------------------------------------------------------------
/collector/collectd/run-collectd.sh:
--------------------------------------------------------------------------------
1 | #!/bin/zsh
2 | sleep $1
3 | /usr/sbin/collectd -f
4 |
--------------------------------------------------------------------------------
/collector/confluent-cloud/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '3.9'
2 |
3 | services:
4 | otel-collector:
5 | container_name: otel-collector
6 | image: otel/opentelemetry-collector-contrib:0.67.0
7 | environment:
8 | LS_ACCESS_TOKEN: ${LS_ACCESS_TOKEN}
9 | CLUSTER_BOOTSTRAP_SERVER: ${CLUSTER_BOOTSTRAP_SERVER}
10 | CLUSTER_API_KEY: ${CLUSTER_API_KEY}
11 | CLUSTER_API_SECRET: ${CLUSTER_API_SECRET}
12 | CONFLUENT_API_ID: ${CONFLUENT_API_ID}
13 | CONFLUENT_API_SECRET: ${CONFLUENT_API_SECRET}
14 | CLUSTER_ID: ${CLUSTER_ID}
15 |
16 | configs:
17 | - source: collector_conf
18 | target: /conf/collector.yml
19 | command: ["--config=/conf/collector.yml"]
20 | networks:
21 | - integrations
22 |
23 | configs:
24 | collector_conf:
25 | file: ./collector.yml
26 |
27 | networks:
28 | integrations:
29 |
--------------------------------------------------------------------------------
/collector/consul/client1.json:
--------------------------------------------------------------------------------
1 | {
2 | "node_name": "consul-client1",
3 | "datacenter": "dc1",
4 | "data_dir": "/consul/data",
5 | "log_level":"INFO",
6 | "retry_join":[
7 | "consul-server"
8 | ],
9 | "service": {
10 | "name": "counting",
11 | "port": 9003,
12 | "connect": {
13 | "sidecar_service": {}
14 | },
15 | "check": {
16 | "id": "counting-check",
17 | "http": "http://localhost:9003/health",
18 | "method": "GET",
19 | "interval": "1s",
20 | "timeout": "1s"
21 | }
22 | },
23 | "ports": {
24 | "grpc": 8502
25 | }
26 | }
27 |
--------------------------------------------------------------------------------
/collector/consul/client2.json:
--------------------------------------------------------------------------------
1 | {
2 | "node_name": "consul-client2",
3 | "datacenter": "dc1",
4 | "data_dir": "/consul/data",
5 | "log_level":"INFO",
6 | "retry_join":[
7 | "consul-server"
8 | ],
9 | "service": {
10 | "name": "dashboard",
11 | "port": 9002,
12 | "connect": {
13 | "sidecar_service": {
14 | "proxy": {
15 | "upstreams": [
16 | {
17 | "destination_name": "counting",
18 | "local_bind_port": 5000
19 | }
20 | ]
21 | }
22 | }
23 | },
24 | "check": {
25 | "id": "dashboard-check",
26 | "http": "http://localhost:9002/health",
27 | "method": "GET",
28 | "interval": "1s",
29 | "timeout": "1s"
30 | }
31 | },
32 | "ports": {
33 | "grpc": 8502
34 | }
35 | }
36 |
--------------------------------------------------------------------------------
/collector/consul/collector.yml:
--------------------------------------------------------------------------------
1 | receivers:
2 | prometheus:
3 | config:
4 | scrape_configs:
5 | - job_name: 'consul-server'
6 | scrape_interval: 10s
7 | metrics_path: '/v1/agent/metrics'
8 | params:
9 | format: ['prometheus']
10 | static_configs:
11 | - targets: ['consul-server:8500']
12 |
13 | exporters:
14 | logging:
15 | loglevel: debug
16 | otlp:
17 | endpoint: ingest.lightstep.com:443
18 | headers:
19 | "lightstep-access-token": "${LS_ACCESS_TOKEN}"
20 |
21 | processors:
22 | batch:
23 |
24 | service:
25 | pipelines:
26 | metrics:
27 | receivers: [prometheus]
28 | processors: [batch]
29 | exporters: [logging, otlp]
30 |
--------------------------------------------------------------------------------
/collector/consul/intention-config-entry.json:
--------------------------------------------------------------------------------
1 | {
2 | "Kind": "service-intentions",
3 | "Name": "counting",
4 | "Sources": [
5 | {
6 | "Name": "dashboard",
7 | "Action": "allow"
8 | }
9 | ]
10 | }
11 |
--------------------------------------------------------------------------------
/collector/consul/server.json:
--------------------------------------------------------------------------------
1 | {
2 | "node_name": "consul-server",
3 | "server": true,
4 | "bootstrap" : true,
5 | "ui_config": {
6 | "enabled" : true
7 | },
8 | "datacenter": "dc1",
9 | "data_dir": "/consul/data",
10 | "telemetry": {
11 | "prometheus_retention_time": "60s",
12 | "disable_hostname": true
13 | },
14 | "log_level":"INFO",
15 | "addresses": {
16 | "http" : "0.0.0.0"
17 | },
18 | "connect": {
19 | "enabled": true
20 | }
21 | }
22 |
--------------------------------------------------------------------------------
/collector/coredns/collector.yml:
--------------------------------------------------------------------------------
1 | receivers:
2 | prometheus:
3 | config:
4 | scrape_configs:
5 | - job_name: otel-coredns
6 | static_configs:
7 | - targets: [coredns:9153]
8 |
9 | exporters:
10 | logging:
11 | loglevel: debug
12 | otlp/public:
13 | endpoint: ingest.lightstep.com:443
14 | headers:
15 | "lightstep-access-token": "${LS_ACCESS_TOKEN}"
16 |
17 | processors:
18 | batch:
19 |
20 | service:
21 | pipelines:
22 | metrics/coredns:
23 | receivers: [prometheus]
24 | processors: [batch]
25 | exporters: [logging, otlp/public]
26 |
--------------------------------------------------------------------------------
/collector/coredns/corefile.conf:
--------------------------------------------------------------------------------
1 | .:53 {
2 | prometheus :9153
3 | }
4 |
--------------------------------------------------------------------------------
/collector/coredns/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '3.9'
2 |
3 | services:
4 | otel-collector:
5 | image: otel/opentelemetry-collector-contrib:0.60.0
6 | environment:
7 | LS_ACCESS_TOKEN: ${LS_ACCESS_TOKEN}
8 | configs:
9 | - source: collector_conf
10 | target: /conf/collector.yml
11 | command: ["--config=/conf/collector.yml"]
12 | networks:
13 | - integrations
14 |
15 | coredns:
16 | image: coredns/coredns:1.10.0
17 | configs:
18 | - source: corefile_conf
19 | target: /etc/corefile.conf
20 | command: ["-conf", "/etc/corefile.conf"]
21 | ports:
22 | - "9153:9153"
23 | networks:
24 | - integrations
25 |
26 | configs:
27 | collector_conf:
28 | file: ./collector.yml
29 | corefile_conf:
30 | file: ./corefile.conf
31 |
32 | networks:
33 | integrations:
34 |
--------------------------------------------------------------------------------
/collector/couchbase/Makefile:
--------------------------------------------------------------------------------
1 | .DEFAULT_GOAL:=help
2 |
3 | # --------------------------
4 | .PHONY: couchbase logs clean help
5 |
6 | couchbase: ## Start Couchbase.
7 | docker compose up -d --build
8 |
9 | logs: ## Tail all logs with -n 1000.
10 | @docker compose logs --follow --tail=1000
11 |
12 | clean: ## Remove Couchbase Containers
13 | @docker compose stop && docker compose rm
14 | @docker volume prune -f --filter label=com.docker.compose.project=couchbase
15 |
16 | help: ## Show this help.
17 | @echo "Make Application Docker Images and Containers using Docker-Compose files in 'docker' Dir."
18 | @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m (default: help)\n\nTargets:\n"} /^[a-zA-Z_-]+:.*?##/ { printf " \033[36m%-12s\033[0m %s\n", $$1, $$2 }' $(MAKEFILE_LIST)
19 |
--------------------------------------------------------------------------------
/collector/couchbase/collector.yml:
--------------------------------------------------------------------------------
1 | receivers:
2 | prometheus/couchbase:
3 | config:
4 | scrape_configs:
5 | - job_name: otel-couchbase-eg
6 | scrape_interval: 5s
7 | static_configs:
8 | - targets: ["couchbase:8091"]
9 |
10 | exporters:
11 | logging:
12 | loglevel: debug
13 | otlp/public:
14 | endpoint: ingest.lightstep.com:443
15 | headers:
16 | "lightstep-access-token": "${LS_ACCESS_TOKEN}"
17 |
18 | processors:
19 | batch:
20 |
21 | service:
22 | pipelines:
23 | metrics:
24 | receivers: [prometheus/couchbase]
25 | processors: [batch]
26 | exporters: [otlp/public, logging]
27 |
--------------------------------------------------------------------------------
/collector/couchbase/docker-compose.override.yml:
--------------------------------------------------------------------------------
1 | version: "3.9"
2 | services:
3 | otel-collector:
4 | image: otel/opentelemetry-collector-contrib:${OTEL_COLLECTOR_VERSION}
5 | environment:
6 | LS_ACCESS_TOKEN: ${LS_ACCESS_TOKEN}
7 | configs:
8 | - source: collector_conf
9 | target: /conf/collector.yml
10 | depends_on:
11 | couchbase:
12 | condition: service_healthy
13 | volumes:
14 | - ./statsout:/statsout
15 | command: ["--config=/conf/collector.yml"]
16 | networks:
17 | - integrations
18 |
19 | configs:
20 | collector_conf:
21 | file: ./collector.yml
22 |
23 |
--------------------------------------------------------------------------------
/collector/couchbase/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '3.9'
2 |
3 | services:
4 | couchbase:
5 | image: couchbase
6 | deploy:
7 | replicas: 1
8 | ports:
9 | - 8091:8091
10 | - 8092:8092
11 | - 8093:8093
12 | - 8094:8094
13 | - 11210:11210
14 | healthcheck:
15 | test: curl --fail -u Administrator:password http://localhost:8091/pools || exit 1
16 | interval: 10s
17 | timeout: 20s
18 | retries: 10
19 | start_period: 30s
20 | networks:
21 | - integrations
22 |
23 | networks:
24 | integrations:
25 |
--------------------------------------------------------------------------------
/collector/couchdb/collector.yml:
--------------------------------------------------------------------------------
1 | receivers:
2 | couchdb:
3 | endpoint: http://couchdb:5984
4 | username: otelu
5 | password: otelp
6 | collection_interval: 10s
7 |
8 | exporters:
9 | otlp:
10 | endpoint: ingest.lightstep.com:443
11 | headers:
12 | "lightstep-access-token": "${LS_ACCESS_TOKEN}"
13 |
14 | processors:
15 | batch:
16 |
17 | service:
18 | pipelines:
19 | metrics:
20 | receivers: [couchdb]
21 | processors: [batch]
22 | exporters: [logging, otlp]
23 |
--------------------------------------------------------------------------------
/collector/couchdb/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '3.9'
2 |
3 | services:
4 | couchdb:
5 | container_name: couchdb
6 | image: "couchdb:${APP_VERSION:-latest}"
7 | ports:
8 | - "${METRICS_PORT:-5984}:5984"
9 | - "${APP_PORT:-5986}:5986"
10 | environment:
11 | COUCHDB_USER: otelu
12 | COUCHDB_PASSWORD: otelp
13 | networks:
14 | - integrations
15 | otel-collector:
16 | image: "otel/opentelemetry-collector-contrib:${COLLECTOR_VERSION:-latest"
17 | command: ["--config=/conf/collector.yml"]
18 | environment:
19 | - LS_ACCESS_TOKEN
20 | volumes:
21 | - ./collector.yml:/conf/collector.yml:rw
22 | depends_on:
23 | - couchdb
24 | networks:
25 | - integrations
26 |
27 | networks:
28 | integrations:
29 |
--------------------------------------------------------------------------------
/collector/docker/dockerstats/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM debian:11.7
2 |
3 | RUN apt update && apt -y install wget
4 | RUN wget https://github.com/open-telemetry/opentelemetry-collector-releases/releases/download/v0.77.0/otelcol-contrib_0.77.0_linux_amd64.tar.gz
5 | RUN tar -xf otelcol-contrib_0.77.0_linux_amd64.tar.gz -C /
6 |
7 | ENTRYPOINT ["/otelcol-contrib"]
8 |
--------------------------------------------------------------------------------
/collector/docker/dockerstats/collector.yml:
--------------------------------------------------------------------------------
1 | receivers:
2 | docker_stats:
3 | endpoint: "unix:///var/run/docker.sock"
4 | metrics:
5 | container.cpu.usage.percpu:
6 | enabled: true
7 |
8 | exporters:
9 | logging:
10 | loglevel: debug
11 | otlp/public:
12 | endpoint: ingest.lightstep.com:443
13 | headers:
14 | "lightstep-access-token": "${LS_ACCESS_TOKEN}"
15 |
16 | processors:
17 | batch:
18 |
19 | service:
20 | pipelines:
21 | metrics:
22 | receivers: [docker_stats]
23 | processors: [batch]
24 | exporters: [logging, otlp/public]
25 | telemetry:
26 | logs:
27 | level: debug
28 |
--------------------------------------------------------------------------------
/collector/docker/dockerstats/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '3.9'
2 |
3 | services:
4 | otel-collector:
5 | container_name: otel
6 | build:
7 | context: ./
8 | dockerfile: ./Dockerfile
9 | environment:
10 | LS_ACCESS_TOKEN: ${LS_ACCESS_TOKEN}
11 | configs:
12 | - source: collector_conf
13 | target: /conf/collector.yml
14 | volumes:
15 | - /var/run/docker.sock:/var/run/docker.sock
16 | command: ["--config=/conf/collector.yml"]
17 |
18 | configs:
19 | collector_conf:
20 | file: ./collector.yml
21 |
--------------------------------------------------------------------------------
/collector/docker/prometheus/collector.yml:
--------------------------------------------------------------------------------
1 | receivers:
2 | prometheus/docker:
3 | config:
4 | scrape_configs:
5 | - job_name: docker-otel-eg
6 | static_configs:
7 | - targets: ["${DEFAULT_IPV4}:2375"]
8 |
9 | exporters:
10 | logging:
11 | loglevel: debug
12 | otlp/public:
13 | endpoint: ingest.lightstep.com:443
14 | headers:
15 | "lightstep-access-token": "${LS_ACCESS_TOKEN}"
16 |
17 | processors:
18 | batch:
19 |
20 | service:
21 | pipelines:
22 | metrics:
23 | receivers: [prometheus/docker]
24 | processors: [batch]
25 | exporters: [logging, otlp/public]
26 | telemetry:
27 | logs:
28 | level: debug
29 |
--------------------------------------------------------------------------------
/collector/docker/prometheus/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '3.9'
2 |
3 | services:
4 | otel-collector:
5 | container_name: otel
6 | image: otel/opentelemetry-collector-contrib
7 | environment:
8 | LS_ACCESS_TOKEN: ${LS_ACCESS_TOKEN}
9 | DEFAULT_IPV4: ${DEFAULT_IPV4}
10 | configs:
11 | - source: collector_conf
12 | target: /conf/collector.yml
13 | command: ["--config=/conf/collector.yml"]
14 | networks:
15 | - integrations
16 |
17 | configs:
18 | collector_conf:
19 | file: ./collector.yml
20 |
21 | networks:
22 | integrations:
23 |
--------------------------------------------------------------------------------
/collector/elasticsearch/.env:
--------------------------------------------------------------------------------
1 | COMPOSE_PROJECT_NAME=elastic-lightstep
2 | ELASTIC_VERSION=8.2.2
3 |
4 | #----------- Resources --------------------------#
5 | ELASTICSEARCH_HEAP=1024m
6 |
7 | #----------- Hosts and Ports --------------------#
8 | # get hostnames from environment variables
9 | ELASTICSEARCH_HOST=elasticsearch
10 | ELASTICSEARCH_PORT=9200
11 |
12 | #----------- Credintials ------------------------#
13 | # Username & Password for Admin Elasticsearch cluster.
14 | # This is used to set the password at setup, and to connect at runtime.
15 | # USERNAME cannot be changed! It is set here for parmeterization only.
16 | ELASTIC_USERNAME=elastic
17 | ELASTIC_PASSWORD=changeme
18 |
19 | #----------- Cluster ----------------------------#
20 | ELASTIC_CLUSTER_NAME=elastic-lightstep-cluster
21 | ELASTIC_INIT_MASTER_NODE=elastic-lightstep-node-0
22 | ELASTIC_NODE_NAME=elastic-lightstep-node-0
23 |
24 | # Hostnames of master eligible elasticsearch instances. (matches compose generated host name)
25 | ELASTIC_DISCOVERY_SEEDS=elasticsearch
26 |
27 |
--------------------------------------------------------------------------------
/collector/elasticsearch/.gitignore:
--------------------------------------------------------------------------------
1 | secrets
2 | statsout
3 | .hidden
4 |
5 |
--------------------------------------------------------------------------------
/collector/elasticsearch/collector.yml:
--------------------------------------------------------------------------------
1 | receivers:
2 | elasticsearch:
3 | nodes: ["elasticsearch"]
4 | endpoint: "https://elasticsearch:9200"
5 | tls:
6 | ca_file: /elastic-ca.crt
7 | username: elastic
8 | password: changeme
9 |
10 | exporters:
11 | logging:
12 | loglevel: debug
13 | otlp/public:
14 | endpoint: ingest.lightstep.com:443
15 | headers:
16 | "lightstep-access-token": "${LS_ACCESS_TOKEN}"
17 |
18 | processors:
19 | batch:
20 |
21 | service:
22 | pipelines:
23 | metrics:
24 | receivers: [elasticsearch]
25 | processors: [batch]
26 | exporters: [otlp/public, logging]
27 |
--------------------------------------------------------------------------------
/collector/elasticsearch/docker-compose.override.yml:
--------------------------------------------------------------------------------
1 | version: "3.8"
2 | services:
3 | otel-collector:
4 | image: otel/opentelemetry-collector-contrib:latest
5 | environment:
6 | LS_ACCESS_TOKEN: ${LS_ACCESS_TOKEN}
7 | configs:
8 | - source: collector_conf
9 | target: /conf/collector.yml
10 | depends_on:
11 | elasticsearch:
12 | condition: service_healthy
13 | secrets:
14 | # you can make these certs with `make certs` or `make setup`
15 | # the secrets are defined in docker-compose.yml
16 | - source: elastic.ca
17 | target: /elastic-ca.crt
18 | - source: elasticsearch.certificate
19 | target: /elasticsearch.crt
20 | - source: elasticsearch.key
21 | target: /elasticsearch.key
22 | command: ["--config=/conf/collector.yml"]
23 |
24 | configs:
25 | collector_conf:
26 | file: ./collector.yml
27 |
--------------------------------------------------------------------------------
/collector/elasticsearch/docker-compose.setup.yml:
--------------------------------------------------------------------------------
1 | version: '3.9'
2 |
3 | services:
4 | keystore:
5 | image: docker.elastic.co/elasticsearch/elasticsearch:${ELASTIC_VERSION}
6 | command: bash /setup/setup-keystore.sh
7 | user: "0"
8 | volumes:
9 | - ./secrets:/secrets
10 | - ./setup/:/setup/
11 | environment:
12 | ELASTIC_PASSWORD: ${ELASTIC_PASSWORD}
13 |
14 | certs:
15 | image: docker.elastic.co/elasticsearch/elasticsearch:${ELASTIC_VERSION}
16 | command: bash /setup/setup-certs.sh
17 | user: "0"
18 | volumes:
19 | - ./secrets:/secrets
20 | - ./setup/:/setup
21 |
--------------------------------------------------------------------------------
/collector/elasticsearch/setup/instances.yml:
--------------------------------------------------------------------------------
1 | instances:
2 | - name: elasticsearch
3 | dns:
4 | - elasticsearch
5 | - localhost
6 | ip:
7 | - 127.0.0.1
8 |
9 | - name: kibana
10 | dns:
11 | - kibana
12 | - localhost
13 | ip:
14 | - 127.0.0.1
--------------------------------------------------------------------------------
/collector/elasticsearch/setup/keystore.sh:
--------------------------------------------------------------------------------
1 | # Exit on Error
2 | set -e
3 |
4 | # Setting Bootstrap Password
5 | echo "Setting bootstrap.password..."
6 | (echo "$ELASTIC_PASSWORD" | elasticsearch-keystore add -x 'bootstrap.password')
7 |
8 | # ----- Setting Secrets
9 |
10 | ## Add Additional Config
11 | # 1- Copy the below commented block, uncomment it, and replace , , and .
12 | # 2- Pass to setup container in `docker-compose-setup.yml`
13 |
14 | ## Setting
15 | #echo "Setting ..."
16 | #(echo "$" | elasticsearch-keystore add -x '')
17 |
18 |
19 | # ----- Setting S3 Secrets
20 |
21 | ## Setting S3 Access Key
22 | #echo "Setting S3 Access Key..."
23 | #(echo "$AWS_ACCESS_KEY_ID" | elasticsearch-keystore add -x 's3.client.default.access_key')
24 | #
25 | ## Setting S3 Secret Key
26 | #echo "Setting S3 Secret Key..."
27 | #(echo "$AWS_SECRET_ACCESS_KEY" | elasticsearch-keystore add -x 's3.client.default.secret_key')
--------------------------------------------------------------------------------
/collector/envoy/Dockerfile-frontenvoy:
--------------------------------------------------------------------------------
1 | FROM envoyproxy/envoy-dev:latest
2 |
3 | RUN apt-get update \
4 | && apt-get install --no-install-recommends -y curl \
5 | && apt-get autoremove -y \
6 | && apt-get clean \
7 | && rm -rf /tmp/* /var/tmp/* /var/lib/apt/lists/*
8 | COPY ./front-envoy.yaml /etc/front-envoy.yaml
9 | RUN chmod go+r /etc/front-envoy.yaml
10 | CMD ["/usr/local/bin/envoy", "-c", "/etc/front-envoy.yaml", "--service-cluster", "front-proxy"]
11 |
--------------------------------------------------------------------------------
/collector/envoy/app/flask/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM python:3.10-slim-bullseye
2 |
3 | ADD requirements.txt /tmp/flask-requirements.txt
4 | RUN pip3 install -r /tmp/flask-requirements.txt
5 | RUN mkdir /code
6 |
7 | ENTRYPOINT ["python3", "/code/service.py"]
8 |
--------------------------------------------------------------------------------
/collector/envoy/app/flask/requirements.in:
--------------------------------------------------------------------------------
1 | flask
2 |
--------------------------------------------------------------------------------
/collector/envoy/app/tracing/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM flask_service:python-3.10-slim-bullseye
2 |
3 | # envoy v1.23.0
4 | COPY --from=envoyproxy/envoy-dev:latest /usr/local/bin/envoy /usr/local/bin/envoy
5 |
6 | ADD requirements.txt /tmp/requirements.txt
7 | RUN pip3 install -r /tmp/requirements.txt
8 |
9 | ADD ./service.py /code/service.py
10 |
11 | ADD ./start_service.sh /usr/local/bin/start_service.sh
12 | RUN chmod u+x /usr/local/bin/start_service.sh
13 | ENTRYPOINT ["/usr/local/bin/start_service.sh"]
14 |
--------------------------------------------------------------------------------
/collector/envoy/app/tracing/requirements.in:
--------------------------------------------------------------------------------
1 | requests
2 |
--------------------------------------------------------------------------------
/collector/envoy/app/tracing/start_service.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | python3 /code/service.py &
3 | envoy -c /etc/service-envoy.yaml --service-cluster "service${SERVICE_NAME}"
4 |
--------------------------------------------------------------------------------
/collector/envoy/collector.yml:
--------------------------------------------------------------------------------
1 | receivers:
2 | otlp:
3 | protocols:
4 | http:
5 | grpc:
6 | prometheus/front-proxy:
7 | config:
8 | scrape_configs:
9 | - job_name: otel-envoy-eg
10 | scrape_interval: 5s
11 | metrics_path: /stats/prometheus
12 | static_configs:
13 | - targets: ["front-envoy:8001"]
14 |
15 | exporters:
16 | logging:
17 | loglevel: debug
18 | otlp/public:
19 | endpoint: ingest.lightstep.com:443
20 | headers:
21 | "lightstep-access-token": "${LS_ACCESS_TOKEN}"
22 |
23 | processors:
24 | batch:
25 |
26 | service:
27 | pipelines:
28 | traces:
29 | receivers: [otlp]
30 | exporters: [logging, otlp/public]
31 | metrics:
32 | receivers: [otlp, prometheus/front-proxy]
33 | processors: [batch]
34 | exporters: [otlp/public]
35 |
--------------------------------------------------------------------------------
/collector/envoy/docker-compose.override.yaml:
--------------------------------------------------------------------------------
1 | version: "3.8"
2 | services:
3 | otel-collector:
4 | # container_name: otel-collector
5 | image: otel/opentelemetry-collector-contrib:0.51.0
6 | environment:
7 | LS_ACCESS_TOKEN: ${LS_ACCESS_TOKEN}
8 | configs:
9 | - source: collector_conf
10 | target: /conf/collector.yml
11 | command: ["--config=/conf/collector.yml"]
12 |
13 | # we can send some traffic to the proxy, so charts have data by enabling the
14 | # profile like `docker-compose --profile loadgen up`
15 | loadgen:
16 | depends_on: [front-envoy]
17 | image: williamyeh/hey
18 | command: ["-z", "10m", "-c", "50", "-q", "50", "http://front-envoy:8080/service/1"]
19 | # to keep this from starting under regular invocation
20 | profiles:
21 | - loadgen
22 |
23 | configs:
24 | collector_conf:
25 | file: ./collector.yml
26 |
--------------------------------------------------------------------------------
/collector/envoy/docker-compose.yaml:
--------------------------------------------------------------------------------
1 | version: "3.8"
2 | services:
3 |
4 | # base images
5 | flask_service:
6 | build:
7 | context: ./app/flask
8 | image: flask_service:python-3.10-slim-bullseye
9 | restart: "no"
10 | deploy:
11 | replicas: 0
12 |
13 | # front-proxy
14 | front-envoy:
15 | build:
16 | context: .
17 | dockerfile: Dockerfile-frontenvoy
18 | ports:
19 | - "8080:8080"
20 | - "8443:8443"
21 | - "8001:8001"
22 |
23 | service1:
24 | build:
25 | context: ./app/tracing
26 | volumes:
27 | - ./service-envoy.yaml:/etc/service-envoy.yaml
28 | environment:
29 | - SERVICE_NAME=1
30 |
31 | service2:
32 | build:
33 | context: ./app/tracing
34 | volumes:
35 | - ./service-envoy.yaml:/etc/service-envoy.yaml
36 | environment:
37 | - SERVICE_NAME=2
38 |
--------------------------------------------------------------------------------
/collector/etcd/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM golang:1.19-rc-alpine3.15 as builder
2 |
3 | ARG ETCD_VERSION=3.5.4
4 |
5 | RUN apk --no-cache add ca-certificates git make zsh bash
6 |
7 | WORKDIR /usr/local/src
8 |
9 | RUN git clone https://github.com/etcd-io/etcd \
10 | && cd etcd && git checkout 08407ff7600eb16c4445d5f21c4fafaf19412e24 \
11 | && make
12 |
13 | FROM alpine:latest
14 |
15 | ARG ETCD_NODE_NAME=$ETCD_NODE_NAME
16 |
17 | RUN apk --no-cache add zsh sed curl vim
18 |
19 | COPY --from=builder /usr/local/src/etcd/bin/etcd /usr/local/bin/etcd
20 | COPY --from=builder /usr/local/src/etcd/bin/etcdctl /usr/local/bin/etcdctl
21 | COPY start-etcd.sh /opt/start-etcd.sh
22 | COPY etcd.conf /etc/etcd.conf
23 |
24 | RUN chmod u+x /opt/start-etcd.sh
25 |
26 | EXPOSE 5050 2379 2380
27 |
28 | ENTRYPOINT ["/opt/start-etcd.sh"]
29 |
--------------------------------------------------------------------------------
/collector/etcd/README.md:
--------------------------------------------------------------------------------
1 | ETCD and OTEL collector: a ETCD cluster and single etcd docker-compose are provided they can be ran as
2 | `docker compose -f docker-compose.cluster.yaml up` or `docker compose -f docker-compose.single.yaml up`
3 | respectively.
4 |
--------------------------------------------------------------------------------
/collector/etcd/config-prometheus.cluster.yaml:
--------------------------------------------------------------------------------
1 | receivers:
2 | prometheus/collectd:
3 | use_start_time_metric: false
4 | start_time_metric_regex: '^(.+_)*process_start_time_seconds$'
5 | config:
6 | scrape_configs:
7 | - job_name: 'component-scraper'
8 | scrape_interval: 5s
9 | metrics_path: "/metrics"
10 | static_configs:
11 | - targets: ["us-east:5050", "us-west:5050", "ap-south:5050", "ap-south:5050", "af-south:5050"]
12 | exporters:
13 | logging:
14 | logLevel: debug
15 | otlp:
16 | endpoint: ingest.lightstep.com:443
17 | headers:
18 | - lightstep-access-token: ${LS_ACCESS_TOKEN}
19 | service:
20 | pipelines:
21 | metrics:
22 | receivers: [prometheus/collectd]
23 | exporters: [logging, otlp]
24 |
--------------------------------------------------------------------------------
/collector/etcd/config-prometheus.single.yaml:
--------------------------------------------------------------------------------
1 | receivers:
2 | prometheus/collectd:
3 | use_start_time_metric: false
4 | start_time_metric_regex: '^(.+_)*process_start_time_seconds$'
5 | config:
6 | scrape_configs:
7 | - job_name: 'component-scraper'
8 | scrape_interval: 5s
9 | metrics_path: "/metrics"
10 | static_configs:
11 | - targets: ["single:5050"]
12 | exporters:
13 | logging:
14 | logLevel: debug
15 | otlp:
16 | endpoint: ingest.lightstep.com:443
17 | headers:
18 | - lightstep-access-token: ${LS_ACCESS_TOKEN}
19 | service:
20 | pipelines:
21 | metrics:
22 | receivers: [prometheus/collectd]
23 | exporters: [logging, otlp]
24 |
--------------------------------------------------------------------------------
/collector/etcd/docker-compose.single.yaml:
--------------------------------------------------------------------------------
1 | x-variables:
2 | env_etcd_single_node: &env_etcd_single_node 'FLAGS=--initial-cluster=single=http://single:2380'
3 | common_settings: &build_and_ports
4 | build: .
5 | ports:
6 | - 2379
7 | - 2380
8 | - 5050 # <- prometheus metrics port
9 | services:
10 | single:
11 | <<: *build_and_ports
12 | environment:
13 | - NAME=single
14 | - *env_etcd_single_node
15 | volumes:
16 | - localhost:/etcd_data
17 | otel-collector:
18 | container_name: otel-collector
19 | hostname: otel-collector
20 | image: otel/opentelemetry-collector-contrib:0.50.0
21 | command: ["--config=/conf/config-prometheus.yaml"]
22 | environment:
23 | - LS_ACCESS_TOKEN=${LS_ACCESS_TOKEN}
24 | volumes:
25 | - ./config-prometheus.single.yaml:/conf/config-prometheus.yaml:rw
26 |
27 | volumes:
28 | localhost:
29 |
--------------------------------------------------------------------------------
/collector/etcd/etcd.conf:
--------------------------------------------------------------------------------
1 | listen-metrics-urls: http://0.0.0.0:5050
2 |
--------------------------------------------------------------------------------
/collector/etcd/start-etcd.sh:
--------------------------------------------------------------------------------
1 | #!/bin/zsh
2 |
3 | set -e -x
4 |
5 | FLAGS=${@}
6 |
7 | FLAGS=$(echo -n "${FLAGS} \n
8 | --name=${NAME} \n
9 | --initial-advertise-peer-urls=http://${NAME}:2380 \n
10 | --listen-peer-urls=http://0.0.0.0:2380 \n
11 | --listen-client-urls=http://0.0.0.0:2379 \n
12 | --advertise-client-urls=http://${NAME}:2379 \n
13 | --heartbeat-interval=250 \n
14 | --election-timeout=1250 \n
15 | --initial-cluster-state=new \n
16 | --listen-metrics-urls=http://0.0.0.0:5050"
17 | )
18 |
19 | echo -n "Running etcd ${FLAGS}"
20 | /usr/local/bin/etcd $(echo -n ${FLAGS})
21 |
--------------------------------------------------------------------------------
/collector/external-dns/Makefile:
--------------------------------------------------------------------------------
1 |
2 | all: setup install-operators
3 |
4 | setup: create-cluster add-repositories create-namespaces
5 | install-operators: install-etcd install-coredns install-external-dns install-ingress install-collector
6 |
7 | add-repositories:
8 | helm repo add coredns https://coredns.github.io/helm
9 | helm repo add open-telemetry https://open-telemetry.github.io/opentelemetry-helm-charts
10 |
11 | create-namespaces:
12 | kubectl create namespace etcd
13 |
14 | create-cluster:
15 | minikube start
16 | minikube addons enable ingress
17 |
18 | delete-cluster:
19 | minikube delete
20 |
21 | install-etcd:
22 | kubectl create -f etcd-cluster.yaml -n etcd
23 |
24 | install-coredns:
25 | helm install my-coredns coredns/coredns -f values-coredns.yaml
26 |
27 | install-external-dns:
28 | kubectl apply -f external-dns.yaml
29 |
30 | install-ingress:
31 | sleep 15
32 | kubectl apply -f ingress.yaml
33 |
34 | install-collector:
35 | ./install_collector.sh
36 |
--------------------------------------------------------------------------------
/collector/external-dns/ingress.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.k8s.io/v1
2 | kind: Ingress
3 | metadata:
4 | name: nginx
5 | annotations:
6 | kubernetes.io/ingress.class: "nginx"
7 | spec:
8 | rules:
9 | - host: ls-collector.com
10 | http:
11 | paths:
12 | - pathType: Prefix
13 | path: /
14 | backend:
15 | service:
16 | name: nginx
17 | port:
18 | number: 80
19 |
--------------------------------------------------------------------------------
/collector/external-dns/install_collector.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | helm install my-collector open-telemetry/opentelemetry-collector -f values-collector.yaml -f - <> "$FLINK_HOME/conf/flink-conf.yaml"; \
7 | echo "metrics.reporter.prom.factory.class: org.apache.flink.metrics.prometheus.PrometheusReporterFactory" >> "$FLINK_HOME/conf/flink-conf.yaml"
8 | COPY --from=builder /home/gradle/build/libs/*.jar $FLINK_HOME/usrlib/
9 | RUN mkdir /state && chown flink:flink /state # workaround for https://github.com/docker/compose/issues/3270
10 |
--------------------------------------------------------------------------------
/collector/flink/collector.yaml:
--------------------------------------------------------------------------------
1 | receivers:
2 | prometheus:
3 | config:
4 | scrape_configs:
5 | - job_name: 'flink'
6 | scrape_interval: 1s
7 | params:
8 | format: [ 'prometheus' ]
9 | scheme: 'http'
10 | tls_config:
11 | insecure_skip_verify: true
12 | static_configs:
13 | - targets: [ 'job-cluster:9249', 'taskmanager1:9249', 'taskmanager2:9249' ]
14 |
15 | exporters:
16 | logging:
17 | logLevel: debug
18 | otlp:
19 | endpoint: ingest.lightstep.com:443
20 | headers:
21 | - lightstep-access-token: ${LS_ACCESS_TOKEN}
22 |
23 | service:
24 | pipelines:
25 | metrics:
26 | receivers: [ prometheus ]
27 | exporters: [ logging, otlp ]
28 | telemetry:
29 | logs:
30 | level: debug
31 |
--------------------------------------------------------------------------------
/collector/flink/gradle/wrapper/gradle-wrapper.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lightstep/opentelemetry-examples/f074eff0fb916a708a524e36f1ba0ecd3ea774c3/collector/flink/gradle/wrapper/gradle-wrapper.jar
--------------------------------------------------------------------------------
/collector/flink/gradle/wrapper/gradle-wrapper.properties:
--------------------------------------------------------------------------------
1 | distributionBase=GRADLE_USER_HOME
2 | distributionPath=wrapper/dists
3 | distributionUrl=https\://services.gradle.org/distributions/gradle-7.5.1-bin.zip
4 | zipStoreBase=GRADLE_USER_HOME
5 | zipStorePath=wrapper/dists
6 |
--------------------------------------------------------------------------------
/collector/flink/src/integrationTest/java/com/github/mbode/flink_prometheus_example/GrafanaIT.java:
--------------------------------------------------------------------------------
1 | package com.github.mbode.flink_prometheus_example;
2 |
3 | import static org.awaitility.Awaitility.await;
4 |
5 | import com.mashape.unirest.http.Unirest;
6 | import org.junit.jupiter.api.Test;
7 |
8 | class GrafanaIT {
9 | private static final String GRAFANA_URL =
10 | "http://"
11 | + System.getProperty("grafana.host")
12 | + ":"
13 | + Integer.getInteger("grafana.tcp.3000")
14 | + "/";
15 |
16 | @Test
17 | void flinkDashboardHasBeenImported() {
18 | await()
19 | .until(
20 | () -> {
21 | final String responseBody =
22 | Unirest.get(GRAFANA_URL + "api/dashboards/uid/veLveEOiz")
23 | .basicAuth("admin", "flink")
24 | .asString()
25 | .getBody();
26 | return responseBody.contains("Flink");
27 | });
28 | }
29 | }
30 |
--------------------------------------------------------------------------------
/collector/flink/src/main/java/com/github/mbode/flink_prometheus_example/RandomSourceFunction.java:
--------------------------------------------------------------------------------
1 | package com.github.mbode.flink_prometheus_example;
2 |
3 | import java.util.concurrent.ThreadLocalRandom;
4 | import org.apache.flink.streaming.api.functions.source.SourceFunction;
5 |
6 | class RandomSourceFunction implements SourceFunction {
7 | private int count = 0;
8 | private volatile boolean isRunning = true;
9 | private int elements;
10 |
11 | RandomSourceFunction(int elements) {
12 | this.elements = elements;
13 | }
14 |
15 | public void run(SourceContext ctx) throws InterruptedException {
16 | while (isRunning && count < elements) {
17 | Thread.sleep(1);
18 | ctx.collect(ThreadLocalRandom.current().nextInt(10_000));
19 | count++;
20 | }
21 | }
22 |
23 | public void cancel() {
24 | isRunning = false;
25 | }
26 | }
27 |
--------------------------------------------------------------------------------
/collector/flink/src/test/java/com/github/mbode/flink_prometheus_example/CollectSink.java:
--------------------------------------------------------------------------------
1 | package com.github.mbode.flink_prometheus_example;
2 |
3 | import java.util.ArrayList;
4 | import java.util.List;
5 | import org.apache.flink.streaming.api.functions.sink.SinkFunction;
6 |
7 | class CollectSink implements SinkFunction {
8 | static final List values = new ArrayList<>();
9 |
10 | @Override
11 | public void invoke(Integer value, Context context) {
12 | values.add(value);
13 | }
14 | }
15 |
--------------------------------------------------------------------------------
/collector/flink/src/test/java/com/github/mbode/flink_prometheus_example/PrometheusExampleJobTest.java:
--------------------------------------------------------------------------------
1 | package com.github.mbode.flink_prometheus_example;
2 |
3 | import org.apache.flink.test.util.AbstractTestBase;
4 | import org.junit.jupiter.api.Test;
5 |
6 | class PrometheusExampleJobTest extends AbstractTestBase {
7 | @Test
8 | void jobRuns() throws Exception {
9 | PrometheusExampleJob.main(new String[] {"--elements", "7"});
10 | }
11 | }
12 |
--------------------------------------------------------------------------------
/collector/fluentd/collector-configmap.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | name: collector
5 | ---
6 | apiVersion: v1
7 | kind: ConfigMap
8 | metadata:
9 | name: collector-config
10 | namespace: collector
11 | data:
12 | collector.yml: |-
13 | receivers:
14 | prometheus:
15 | config:
16 | scrape_configs:
17 | - job_name: otel-fluentd
18 | static_configs:
19 | - targets: ["${FLUENTD_HOST}:24224"]
20 | exporters:
21 | logging:
22 | loglevel: debug
23 | otlp/public:
24 | endpoint: ingest.lightstep.com:443
25 | headers:
26 | "lightstep-access-token": "${LS_ACCESS_TOKEN}"
27 | processors:
28 | batch:
29 | service:
30 | pipelines:
31 | metrics/fluentd:
32 | receivers: [prometheus]
33 | processors: [batch]
34 | exporters: [logging, otlp/public]
35 |
--------------------------------------------------------------------------------
/collector/fluentd/collector.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: DaemonSet
3 | metadata:
4 | name: collector
5 | namespace: collector
6 | labels:
7 | k8s-app: collector
8 | spec:
9 | selector:
10 | matchLabels:
11 | k8s-app: collector
12 | template:
13 | metadata:
14 | labels:
15 | k8s-app: collector
16 | spec:
17 | containers:
18 | - name: collector
19 | imagePullPolicy: "IfNotPresent"
20 | image: otel/opentelemetry-collector-contrib:0.75.0
21 | env:
22 | - name: LS_ACCESS_TOKEN
23 | valueFrom:
24 | secretKeyRef:
25 | name: ls
26 | key: access_token
27 | - name: FLUENTD_HOST
28 | valueFrom:
29 | fieldRef:
30 | fieldPath: status.hostIP
31 | args: ["--config=/conf/collector.yml"]
32 | volumeMounts:
33 | - name: collector-config
34 | mountPath: /conf/
35 | terminationGracePeriodSeconds: 30
36 | volumes:
37 | - name: collector-config
38 | configMap:
39 | name: collector-config
--------------------------------------------------------------------------------
/collector/fluentd/fluentd-rbac.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | name: fluentd
5 | namespace: fluentd
6 | ---
7 | apiVersion: rbac.authorization.k8s.io/v1
8 | kind: ClusterRole
9 | metadata:
10 | name: fluentd
11 | namespace: fluentd
12 | rules:
13 | - apiGroups:
14 | - ""
15 | resources:
16 | - pods
17 | - namespaces
18 | verbs:
19 | - get
20 | - list
21 | - watch
22 | ---
23 | kind: ClusterRoleBinding
24 | apiVersion: rbac.authorization.k8s.io/v1
25 | metadata:
26 | name: fluentd
27 | roleRef:
28 | kind: ClusterRole
29 | name: fluentd
30 | apiGroup: rbac.authorization.k8s.io
31 | subjects:
32 | - kind: ServiceAccount
33 | name: fluentd
34 | namespace: fluentd
--------------------------------------------------------------------------------
/collector/gitea/collector.yaml:
--------------------------------------------------------------------------------
1 | receivers:
2 | prometheus/gitea:
3 | config:
4 | scrape_configs:
5 | - job_name: 'gitea'
6 | scrape_interval: 20s
7 | metrics_path: "/metrics"
8 | static_configs:
9 | - targets: ["gitea:3000"]
10 | exporters:
11 | logging:
12 | loglevel: debug
13 | otlp:
14 | endpoint: ingest.lightstep.com:443
15 | headers:
16 | - lightstep-access-token: ${LS_ACCESS_TOKEN}
17 | service:
18 | pipelines:
19 | metrics:
20 | receivers: [prometheus/gitea]
21 | exporters: [logging, otlp]
22 |
--------------------------------------------------------------------------------
/collector/gitea/docker-compose.yaml:
--------------------------------------------------------------------------------
1 | version: "3.9"
2 |
3 | services:
4 | otel-collector:
5 | image: otel/opentelemetry-collector-contrib:0.81.0
6 | hostname: otel-collector
7 | restart: always
8 | command: [ "--config=/conf/collector.yaml" ]
9 | volumes:
10 | - ./collector.yaml:/conf/collector.yaml:rw
11 | environment:
12 | LS_ACCESS_TOKEN: ${LS_ACCESS_TOKEN}
13 |
14 | gitea:
15 | image: gitea/gitea:1.20.2
16 | hostname: gitea
17 | restart: always
18 | volumes:
19 | - gitea:/data
20 | - ./app.ini:/data/gitea/conf/app.ini
21 | - /etc/timezone:/etc/timezone:ro
22 | - /etc/localtime:/etc/localtime:ro
23 | ports:
24 | - "3000:3000"
25 | - "222:22"
26 | environment:
27 | - GITEA__metrics__ENABLED=true
28 |
29 | volumes:
30 | gitea:
31 | driver: local
32 |
--------------------------------------------------------------------------------
/collector/grafana/collector.yaml:
--------------------------------------------------------------------------------
1 | receivers:
2 | prometheus:
3 | config:
4 | scrape_configs:
5 | - job_name: 'grafana_metrics'
6 | scrape_interval: 15s
7 | scrape_timeout: 5s
8 | params:
9 | format: [ 'prometheus' ]
10 | scheme: 'http'
11 | tls_config:
12 | insecure_skip_verify: true
13 | static_configs:
14 | - targets: [ 'grafana:3000' ]
15 |
16 | exporters:
17 | logging:
18 | verbosity: detailed
19 | otlp:
20 | endpoint: ingest.lightstep.com:443
21 | headers:
22 | "lightstep-access-token": "${LS_ACCESS_TOKEN}"
23 |
24 | processors:
25 | batch:
26 |
27 | service:
28 | pipelines:
29 | metrics:
30 | receivers: [ prometheus ]
31 | processors: [ batch ]
32 | exporters: [ logging, otlp ]
33 |
34 |
--------------------------------------------------------------------------------
/collector/grafana/prometheus/prometheus.yml:
--------------------------------------------------------------------------------
1 | global:
2 | scrape_interval: 15s
3 | evaluation_interval: 15s
4 | external_labels:
5 | monitor: 'local'
6 |
7 |
8 | scrape_configs:
9 |
10 | - job_name: 'grafana_metrics'
11 | scrape_interval: 15s
12 | static_configs:
13 | - targets: [ 'grafana:3000' ]
14 |
--------------------------------------------------------------------------------
/collector/grafana/provisioning/dashboards/default.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: 1
2 |
3 | providers:
4 | - name: "Dashboard provider"
5 | orgId: 1
6 | type: file
7 | disableDeletion: false
8 | updateIntervalSeconds: 10
9 | allowUiUpdates: false
10 | options:
11 | path: /etc/grafana/provisioning/dashboards/
12 | foldersFromFilesStructure: true
13 |
--------------------------------------------------------------------------------
/collector/grafana/provisioning/datasources/default.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: 1
2 |
3 | datasources:
4 | - name: Prometheus
5 | uid: P4169E866C3094E38
6 | type: prometheus
7 | url: http://prometheus:9090
8 | editable: true
9 | isDefault: true
10 |
--------------------------------------------------------------------------------
/collector/gunicorn/README.md:
--------------------------------------------------------------------------------
1 | Gunicorn supports statsd receiver.
2 |
3 | Run `docker-compose up`
4 | Visit `http://localhost:5000` to generate sample metrics.
5 |
--------------------------------------------------------------------------------
/collector/gunicorn/collector.yaml:
--------------------------------------------------------------------------------
1 | receivers:
2 | statsd:
3 | endpoint: "otel-collector:8125"
4 | aggregation_interval: 70s
5 | enable_metric_type: true
6 | is_monotonic_counter: false
7 | timer_histogram_mapping:
8 | - statsd_type: "histogram"
9 | observer_type: "summary"
10 | - statsd_type: "timing"
11 |
12 | exporters:
13 | logging:
14 | logLevel: debug
15 | otlp:
16 | endpoint: ingest.lightstep.com:443
17 | headers:
18 | - lightstep-access-token: ${LS_ACCESS_TOKEN}
19 | service:
20 | pipelines:
21 | metrics:
22 | receivers: [statsd]
23 | exporters: [logging, otlp]
24 | telemetry:
25 | logs:
26 | level: debug
27 |
--------------------------------------------------------------------------------
/collector/gunicorn/docker-compose.yaml:
--------------------------------------------------------------------------------
1 | version: "3.9"
2 |
3 | services:
4 | otel-collector:
5 | image: otel/opentelemetry-collector-contrib:0.64.0
6 | hostname: otel-collector
7 | restart: always
8 | command: [ "--config=/conf/collector.yaml" ]
9 | ports:
10 | - "8125:8125/udp"
11 | volumes:
12 | - ./collector.yaml:/conf/collector.yaml:rw
13 | environment:
14 | LS_ACCESS_TOKEN: ${LS_ACCESS_TOKEN}
15 | web:
16 | build: ./web
17 | command: gunicorn --bind 0.0.0.0:5000 main:app --statsd-host otel-collector:8125
18 | ports:
19 | - "5000:5000"
20 | depends_on:
21 | - otel-collector
22 |
--------------------------------------------------------------------------------
/collector/gunicorn/web/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM python:3.10.7-slim-buster
2 |
3 | WORKDIR /usr/src/app
4 |
5 | ENV PYTHONDONTWRITEBYTECODE 1
6 | ENV PYTHONUNBUFFERED 1
7 |
8 | RUN pip install --upgrade pip
9 | COPY ./requirements.txt /usr/src/app/requirements.txt
10 | RUN pip install -r requirements.txt
11 |
12 | COPY . /usr/src/app/
13 |
--------------------------------------------------------------------------------
/collector/gunicorn/web/main.py:
--------------------------------------------------------------------------------
1 | from flask import Flask, jsonify
2 |
3 |
4 | app = Flask(__name__)
5 |
6 |
7 | @app.route("/")
8 | def hello_world():
9 | return jsonify(hello="world")
10 |
--------------------------------------------------------------------------------
/collector/gunicorn/web/requirements.txt:
--------------------------------------------------------------------------------
1 | click==8.1.3
2 | Flask==2.3.2
3 | gunicorn==20.1.0
4 | importlib-metadata==5.0.0
5 | itsdangerous==2.1.2
6 | Jinja2==3.1.2
7 | MarkupSafe==2.1.1
8 | Werkzeug==3.0.1
9 | zipp==3.10.0
10 |
--------------------------------------------------------------------------------
/collector/hadoop/.gitignore:
--------------------------------------------------------------------------------
1 | data/
2 |
--------------------------------------------------------------------------------
/collector/hadoop/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM curlimages/curl:7.82.0 as curler
2 | ARG JMX_JAR_VERSION=v1.14.0
3 | USER root
4 | RUN curl -L \
5 | --output /opentelemetry-jmx-metrics.jar \
6 | "https://github.com/open-telemetry/opentelemetry-java-contrib/releases/download/${JMX_JAR_VERSION}/opentelemetry-jmx-metrics.jar"
7 |
8 | RUN curl -L -s \
9 | "https://github.com/open-telemetry/opentelemetry-collector-releases/releases/download/v0.53.0/otelcol-contrib_0.53.0_linux_amd64.tar.gz" | \
10 | tar -xvz -C /
11 |
12 | FROM ibmjava:8-jre
13 | WORKDIR /
14 |
15 | COPY --from=curler /opentelemetry-jmx-metrics.jar /opt/opentelemetry-jmx-metrics.jar
16 | COPY --from=curler /otelcol-contrib /otelcol-contrib
17 |
18 | ENTRYPOINT [ "/otelcol-contrib" ]
19 | CMD ["--config", "/etc/otel/config.yaml"]
--------------------------------------------------------------------------------
/collector/hadoop/collector.yml:
--------------------------------------------------------------------------------
1 | receivers:
2 | jmx/hadoop:
3 | jar_path: /opt/opentelemetry-jmx-metrics.jar
4 | endpoint: namenode:8004
5 | target_system: jvm,hadoop
6 | collection_interval: 3s
7 |
8 | processors:
9 | batch:
10 |
11 | exporters:
12 | logging:
13 | loglevel: debug
14 | otlp:
15 | endpoint: ingest.lightstep.com:443
16 | headers:
17 | "lightstep-access-token": "${LS_ACCESS_TOKEN}"
18 |
19 | service:
20 | telemetry:
21 | logs:
22 | level: "debug"
23 | pipelines:
24 | metrics:
25 | receivers: [jmx/hadoop]
26 | processors: [batch]
27 | exporters: [logging,otlp]
28 |
--------------------------------------------------------------------------------
/collector/hadoop/conf/hadoop-env.sh:
--------------------------------------------------------------------------------
1 |
2 | export HADOOP_OS_TYPE=${HADOOP_OS_TYPE:-$(uname -s)}
3 | # Set JMX options
4 | export HDFS_NAMENODE_OPTS="-Dcom.sun.management.jmxremote=true -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.port=8004 $HDFS_NAMENODE_OPTS"
5 | export HDFS_DATANODE_OPTS="-Dcom.sun.management.jmxremote=true -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.port=8006 $HDFS_DATANODE_OPTS"
--------------------------------------------------------------------------------
/collector/hadoop/conf/yarn-env.sh:
--------------------------------------------------------------------------------
1 | # Set JMX options
2 | export YARN_RESOURCEMANAGER_OPTS="-Dcom.sun.management.jmxremote=true -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.port=8002 $YARN_RESOURCEMANAGER_OPTS"
3 | export YARN_NODEMANAGER_OPTS="-Dcom.sun.management.jmxremote=true -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.port=8002 $YARN_NODEMANAGER_OPTS"
--------------------------------------------------------------------------------
/collector/hadoop/submit/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM curlimages/curl:7.82.0 as hadoop_curl
2 | ARG JMX_JAR_VERSION=v1.14.0
3 | USER root
4 |
5 | RUN curl -L \
6 | --output /hadoop-mapreduce-examples-2.7.1-sources.jar \
7 | "https://repo1.maven.org/maven2/org/apache/hadoop/hadoop-mapreduce-examples/2.7.1/hadoop-mapreduce-examples-2.7.1-sources.jar"
8 |
9 | FROM bde2020/hadoop-base:2.0.0-hadoop3.2.1-java8
10 |
11 | COPY --from=hadoop_curl /hadoop-mapreduce-examples-2.7.1-sources.jar /opt/hadoop/applications/hadoop-mapreduce-examples-2.7.1-sources.jar
12 |
13 | ENV JAR_FILEPATH="/opt/hadoop/applications/hadoop-mapreduce-examples-2.7.1-sources.jar"
14 | ENV CLASS_TO_RUN="org.apache.hadoop.examples.WordCount"
15 | ENV PARAMS="/input /output"
16 |
17 | ADD run.sh /run.sh
18 | RUN chmod a+x /run.sh
19 |
20 | CMD ["/run.sh"]
--------------------------------------------------------------------------------
/collector/hadoop/submit/run.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | $HADOOP_HOME/bin/hadoop jar $JAR_FILEPATH $CLASS_TO_RUN $PARAMS
--------------------------------------------------------------------------------
/collector/haproxy/collector.yml:
--------------------------------------------------------------------------------
1 | receivers:
2 | prometheus:
3 | config:
4 | scrape_configs:
5 | - job_name: 'haproxy'
6 | scrape_interval: 10s
7 | static_configs:
8 | - targets: ['haproxy:8404']
9 | exporters:
10 | logging:
11 | loglevel: debug
12 | otlp:
13 | endpoint: ingest.lightstep.com:443
14 | headers:
15 | "lightstep-access-token": "${LS_ACCESS_TOKEN}"
16 |
17 | processors:
18 | batch:
19 |
20 | service:
21 | pipelines:
22 | metrics:
23 | receivers: [prometheus]
24 | processors: [batch]
25 | exporters: [logging, otlp]
26 |
--------------------------------------------------------------------------------
/collector/haproxy/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: "3.9"
2 |
3 | services:
4 | {{.AppID}}:
5 | image: {{.Image}}:{{.ImageVersion}}
6 | ports:
7 | {{range $port := .Ports}}- {{$port.No}}:{{if $port.Expose}}:{{$port.No}}
8 | {{end}}
9 | {{if .Config}}volumes:
10 | - {{.Config.Source}}:{{.Config.Target}}:ro{{end}}
11 | {{if .Depends}}depends_on:
12 | {{range $dep := .Depends}}- {{$dep}}
13 | {{end}}
14 | {{if .Command}}command: {{.Command}}{{end}
15 | {{if .Healthcheck}}healthcheck:
16 | {{.Healthcheck}}{{end}}
17 | networks:
18 | - integrations
19 |
20 | # for receivers that scrape this Collector block should be exactly like this
21 | otel-collector:
22 | image: otel/opentelemetry-collector-contrib:{{.CollectorVersion}}
23 | command: ["--config=/conf/collector.yml"]
24 | environment:
25 | LS_ACCESS_TOKEN: ${LS_ACCESS_TOKEN}
26 | volumes:
27 | - ./collector.yml:/conf/collector.yml:ro
28 | networks:
29 | - integrations
30 |
31 | networks:
32 | integrations:
33 |
--------------------------------------------------------------------------------
/collector/haproxy/haproxy.cfg:
--------------------------------------------------------------------------------
1 | global
2 | log stdout format raw local0 info
3 |
4 | defaults
5 | mode http
6 | timeout client 10s
7 | timeout connect 5s
8 | timeout server 10s
9 | timeout http-request 10s
10 | log global
11 |
12 | frontend stats
13 | bind *:8404
14 | http-request use-service prometheus-exporter if { path /metrics }
15 | stats enable
16 | stats uri /
17 | stats refresh 10s
18 |
19 | frontend myfrontend
20 | bind :80
21 | default_backend webservers
22 |
23 | backend webservers
24 | server s1 haproxy_echoserver_1:5678 check
25 | server s2 haproxy_echoserver_2:5678 check
26 | server s3 haproxy_echoserver_3:5678 check
27 |
--------------------------------------------------------------------------------
/collector/haproxy/k8s/Chart.yaml:
--------------------------------------------------------------------------------
1 | name: k8s
2 | description: A generated Helm Chart for k8s from Skippbox Kompose
3 | version: 0.0.1
4 | apiVersion: v1
5 | keywords:
6 | - k8s
7 | sources:
8 | home:
9 |
--------------------------------------------------------------------------------
/collector/haproxy/k8s/README.md:
--------------------------------------------------------------------------------
1 | This chart was created by Kompose
2 |
--------------------------------------------------------------------------------
/collector/haproxy/k8s/templates/echoserver-deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | annotations:
5 | kompose.cmd: kompose convert --chart --out k8s
6 | kompose.version: 1.28.0 (HEAD)
7 | creationTimestamp: null
8 | labels:
9 | io.kompose.service: echoserver
10 | name: echoserver
11 | spec:
12 | replicas: 3
13 | selector:
14 | matchLabels:
15 | io.kompose.service: echoserver
16 | strategy: {}
17 | template:
18 | metadata:
19 | annotations:
20 | kompose.cmd: kompose convert --chart --out k8s
21 | kompose.version: 1.28.0 (HEAD)
22 | creationTimestamp: null
23 | labels:
24 | io.kompose.network/haproxy-integrations: "true"
25 | io.kompose.service: echoserver
26 | spec:
27 | containers:
28 | - args:
29 | - -text
30 | - hello world
31 | image: hashicorp/http-echo:latest
32 | name: echoserver
33 | resources: {}
34 | restartPolicy: Always
35 | status: {}
36 |
--------------------------------------------------------------------------------
/collector/haproxy/k8s/templates/haproxy-claim0-persistentvolumeclaim.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: PersistentVolumeClaim
3 | metadata:
4 | creationTimestamp: null
5 | labels:
6 | io.kompose.service: haproxy-claim0
7 | name: haproxy-claim0
8 | spec:
9 | accessModes:
10 | - ReadOnlyMany
11 | resources:
12 | requests:
13 | storage: 100Mi
14 | status: {}
15 |
--------------------------------------------------------------------------------
/collector/haproxy/k8s/templates/haproxy-integrations-networkpolicy.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.k8s.io/v1
2 | kind: NetworkPolicy
3 | metadata:
4 | creationTimestamp: null
5 | name: haproxy-integrations
6 | spec:
7 | ingress:
8 | - from:
9 | - podSelector:
10 | matchLabels:
11 | io.kompose.network/haproxy-integrations: "true"
12 | podSelector:
13 | matchLabels:
14 | io.kompose.network/haproxy-integrations: "true"
15 |
--------------------------------------------------------------------------------
/collector/haproxy/k8s/templates/haproxy-service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | annotations:
5 | kompose.cmd: kompose convert --chart --out k8s
6 | kompose.version: 1.28.0 (HEAD)
7 | creationTimestamp: null
8 | labels:
9 | io.kompose.service: haproxy
10 | name: haproxy
11 | spec:
12 | ports:
13 | - name: "8080"
14 | port: 8080
15 | targetPort: 80
16 | - name: "8404"
17 | port: 8404
18 | targetPort: 8404
19 | selector:
20 | io.kompose.service: haproxy
21 | status:
22 | loadBalancer: {}
23 |
--------------------------------------------------------------------------------
/collector/haproxy/k8s/templates/loadgen-deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | annotations:
5 | kompose.cmd: kompose convert --chart --out k8s
6 | kompose.version: 1.28.0 (HEAD)
7 | creationTimestamp: null
8 | labels:
9 | io.kompose.service: loadgen
10 | name: loadgen
11 | spec:
12 | replicas: 1
13 | selector:
14 | matchLabels:
15 | io.kompose.service: loadgen
16 | strategy: {}
17 | template:
18 | metadata:
19 | annotations:
20 | kompose.cmd: kompose convert --chart --out k8s
21 | kompose.version: 1.28.0 (HEAD)
22 | creationTimestamp: null
23 | labels:
24 | io.kompose.network/haproxy-integrations: "true"
25 | io.kompose.service: loadgen
26 | spec:
27 | containers:
28 | - args:
29 | - -t12
30 | - -c400
31 | - -d10m
32 | - http://haproxy/foo
33 | image: williamyeh/wrk
34 | name: wrk
35 | resources: {}
36 | restartPolicy: Always
37 | status: {}
38 |
--------------------------------------------------------------------------------
/collector/haproxy/k8s/templates/otel-collector-claim0-persistentvolumeclaim.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: PersistentVolumeClaim
3 | metadata:
4 | creationTimestamp: null
5 | labels:
6 | io.kompose.service: otel-collector-claim0
7 | name: otel-collector-claim0
8 | spec:
9 | accessModes:
10 | - ReadOnlyMany
11 | resources:
12 | requests:
13 | storage: 100Mi
14 | status: {}
15 |
--------------------------------------------------------------------------------
/collector/haproxy/k8s/templates/otel-collector-service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | annotations:
5 | kompose.cmd: kompose convert --chart --out k8s
6 | kompose.version: 1.28.0 (HEAD)
7 | creationTimestamp: null
8 | labels:
9 | io.kompose.service: otel-collector
10 | name: otel-collector
11 | spec:
12 | ports:
13 | - name: "8888"
14 | port: 8888
15 | targetPort: 8888
16 | selector:
17 | io.kompose.service: otel-collector
18 | status:
19 | loadBalancer: {}
20 |
--------------------------------------------------------------------------------
/collector/hbase/.gitignore:
--------------------------------------------------------------------------------
1 | data/
2 |
--------------------------------------------------------------------------------
/collector/hbase/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM curlimages/curl:7.82.0 as curler
2 | ARG JMX_JAR_VERSION=v1.14.0
3 | USER root
4 | RUN curl -L \
5 | --output /opentelemetry-jmx-metrics.jar \
6 | "https://github.com/open-telemetry/opentelemetry-java-contrib/releases/download/${JMX_JAR_VERSION}/opentelemetry-jmx-metrics.jar"
7 |
8 | RUN curl -L -s \
9 | "https://github.com/open-telemetry/opentelemetry-collector-releases/releases/download/v0.53.0/otelcol-contrib_0.53.0_linux_amd64.tar.gz" | \
10 | tar -xvz -C /
11 |
12 | FROM ibmjava:8-jre
13 | WORKDIR /
14 |
15 | COPY --from=curler /opentelemetry-jmx-metrics.jar /opt/opentelemetry-jmx-metrics.jar
16 | COPY --from=curler /otelcol-contrib /otelcol-contrib
17 |
18 | ENTRYPOINT [ "/otelcol-contrib" ]
19 | CMD ["--config", "/etc/otel/config.yaml"]
--------------------------------------------------------------------------------
/collector/hbase/README.md:
--------------------------------------------------------------------------------
1 | [](https://gitter.im/big-data-europe/Lobby)
2 |
3 | # docker-hbase
4 |
5 | # Standalone
6 | To run standalone hbase:
7 | ```
8 | docker-compose -f docker-compose.yml up -d
9 | ```
10 | The deployment is the same as in [quickstart HBase documentation](https://hbase.apache.org/book.html#quickstart).
11 | Can be used for testing/development, connected to Hadoop cluster.
12 |
13 | This deployment will start Zookeeper, HMaster and HRegionserver in separate containers.
14 |
--------------------------------------------------------------------------------
/collector/hbase/collector.yml:
--------------------------------------------------------------------------------
1 | receivers:
2 | jmx/hbase:
3 | jar_path: /opt/opentelemetry-jmx-metrics.jar
4 | endpoint: hbase:10101
5 | target_system: jvm,hbase
6 | collection_interval: 3s
7 |
8 | exporters:
9 | logging:
10 | loglevel: debug
11 | otlp:
12 | endpoint: ingest.lightstep.com:443
13 | headers:
14 | "lightstep-access-token": "${LS_ACCESS_TOKEN}"
15 |
16 | processors:
17 | batch:
18 |
19 | service:
20 | pipelines:
21 | metrics:
22 | receivers: [jmx/hbase]
23 | processors: [batch]
24 | exporters: [logging, otlp]
--------------------------------------------------------------------------------
/collector/hbase/hbase.env:
--------------------------------------------------------------------------------
1 | HBASE_CONF_hbase_rootdir=hdfs://namenode:9000/hbase
2 | HBASE_CONF_hbase_cluster_distributed=false
3 | HBASE_CONF_hbase_zookeeper_property_dataDir=/zookeeper-data
4 | HBASE_CONF_hbase_zookeeper_quorum=hbase
5 |
6 | HBASE_CONF_hbase_master_port=16000
7 | HBASE_CONF_hbase_master_info_port=16010
8 | HBASE_CONF_hbase_regionserver_port=16020
9 | HBASE_CONF_hbase_regionserver_info_port=16030
10 | HBASE_CONF_hbase_zookeeper_peerport=2888
11 | HBASE_CONF_hbase_zookeeper_leaderport=3888
12 | HBASE_CONF_hbase_zookeeper_property_clientPort=2181
13 |
--------------------------------------------------------------------------------
/collector/hostmetrics/collector.yml:
--------------------------------------------------------------------------------
1 | receivers:
2 | hostmetrics:
3 | scrapers:
4 | cpu:
5 | disk:
6 | paging:
7 | load:
8 | process:
9 | processes:
10 | filesystem:
11 | network:
12 | memory:
13 | scrape_interval: 5s
14 | collector_interval: 5s
15 |
16 | processors:
17 | resourcedetection:
18 | detectors: [system]
19 | batch:
20 |
21 | exporters:
22 | logging:
23 | loglevel: debug
24 | # configuring otlp to Cloud Observability
25 | otlp:
26 | endpoint: ingest.lightstep.com:443
27 | headers:
28 | "lightstep-access-token": "${LS_ACCESS_TOKEN}"
29 |
30 | service:
31 | telemetry:
32 | metrics:
33 | pipelines:
34 | metrics:
35 | receivers: [hostmetrics]
36 | processors: [resourcedetection, batch]
37 | exporters: [logging, otlp]
38 |
--------------------------------------------------------------------------------
/collector/hostmetrics/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: "3.7"
2 | services:
3 | otel-collector:
4 | container_name: otel-collector
5 | image: otel/opentelemetry-collector-contrib:0.53.0
6 | command: ["--config=/conf/collector.yml"]
7 | environment:
8 | LS_ACCESS_TOKEN: ${LS_ACCESS_TOKEN}
9 | network_mode: "host"
10 | volumes:
11 | - ./collector.yml:/conf/collector.yml:rw
12 | networks:
13 | integrations:
14 |
--------------------------------------------------------------------------------
/collector/httpcheck/collector.yml:
--------------------------------------------------------------------------------
1 | exporters:
2 | logging:
3 | loglevel: debug
4 | otlp/lightstep:
5 | endpoint: ingest.lightstep.com:443
6 | headers:
7 | "lightstep-access-token": "${LS_ACCESS_TOKEN}"
8 |
9 | processors:
10 | batch:
11 |
12 | receivers:
13 | httpcheck/webserver:
14 | endpoint: http://webserver
15 | collection_interval: 5s
16 |
17 | service:
18 | pipelines:
19 | metrics:
20 | receivers: [httpcheck/webserver]
21 | processors: [batch]
22 | exporters: [logging, otlp/lightstep]
23 |
--------------------------------------------------------------------------------
/collector/httpcheck/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '3.9'
2 | services:
3 | webserver:
4 | container_name: webserver
5 | image: nginx
6 | ports:
7 | - '8080:80'
8 | networks:
9 | - integrations
10 | stop_grace_period: 1s
11 | otel-collector:
12 | container_name: otel-collector
13 | image: otel/opentelemetry-collector-contrib:0.77.0
14 | command: ["--config=/conf/collector.yml"]
15 | environment:
16 | LS_ACCESS_TOKEN: ${LS_ACCESS_TOKEN}
17 | networks:
18 | - integrations
19 | volumes:
20 | - ./collector.yml:/conf/collector.yml:r
21 | networks:
22 | integrations:
23 |
--------------------------------------------------------------------------------
/collector/ibmmq/collector.yaml:
--------------------------------------------------------------------------------
1 | receivers:
2 | prometheus/ibmmq:
3 | use_start_time_metric: false
4 | start_time_metric_regex: '^(.+_)*process_start_time_seconds$'
5 | config:
6 | scrape_configs:
7 | - job_name: 'ibmq-scraper'
8 | scrape_interval: 5s
9 | metrics_path: "/metrics"
10 | static_configs:
11 | - targets: ["ibmmq-leader:9157"]
12 | exporters:
13 | logging:
14 | loglevel: debug
15 | otlp:
16 | endpoint: ingest.lightstep.com:443
17 | headers:
18 | - lightstep-access-token: "${LS_ACCESS_TOKEN}"
19 |
20 | service:
21 | pipelines:
22 | metrics:
23 | receivers: [prometheus/ibmmq]
24 | exporters: [logging, otlp]
25 |
--------------------------------------------------------------------------------
/collector/ibmmq/docker-compose.yaml:
--------------------------------------------------------------------------------
1 | version: "3.2"
2 | services:
3 | ibmmq-leader:
4 | image: ibmcom/mq:latest
5 | hostname: ibmmq-leader
6 | container_name: 'ibmmq-leader'
7 | environment:
8 | - MQ_DEV=true
9 | - MQ_ADMIN_PASSWORD=password
10 | - LICENSE=accept
11 | - MQ_APP_PASSWORD=password
12 | - MQ_ENABLE_METRICS=true
13 | volumes:
14 | - ./mqs.ini:/var/mqm/mqm
15 | ports:
16 | - 9157
17 | networks:
18 | - integrations
19 | otel-collector:
20 | container_name: otel-collect
21 | hostname: otel-collector
22 | image: otel/opentelemetry-collector-contrib:0.77.0
23 | command: ["--config=/conf/collector.yaml"]
24 | environment:
25 | LS_ACCESS_TOKEN: ${LS_ACCESS_TOKEN}
26 | networks:
27 | - integrations
28 | volumes:
29 | - ./collector.yaml:/conf/collector.yaml:rw
30 | networks:
31 | integrations:
32 | driver: bridge
33 |
--------------------------------------------------------------------------------
/collector/iis/Dockerfile_collector:
--------------------------------------------------------------------------------
1 | FROM mcr.microsoft.com/windows/server:ltsc2022
2 |
3 | ADD ./collector.yml /collector.yml
4 |
5 | RUN powershell iwr -outf C:\collector.tar.gz https://github.com/open-telemetry/opentelemetry-collector-releases/releases/download/v0.62.1/otelcol_0.62.1_windows_amd64.tar.gz
6 |
7 | RUN tar xvfz C:\collector.tar.gz
8 |
9 | ENV NO_WINDOWS_SERVICE=1
10 |
11 | ENTRYPOINT ["C:\\otelcol.exe", "--config=/collector.yml"]
12 |
--------------------------------------------------------------------------------
/collector/iis/Dockerfile_iis:
--------------------------------------------------------------------------------
1 | FROM mcr.microsoft.com/windows/servercore/iis:windowsservercore-ltsc2022
2 |
3 | RUN powershell iwr -outf C:\exporter.exe https://github.com/prometheus-community/windows_exporter/releases/download/v0.20.0/windows_exporter-0.20.0-amd64.exe
4 |
5 | ENTRYPOINT ["C:\\exporter.exe", "--collectors.enabled=iis"]
6 |
--------------------------------------------------------------------------------
/collector/iis/collector.yml:
--------------------------------------------------------------------------------
1 | receivers:
2 | prometheus:
3 | config:
4 | scrape_configs:
5 | - job_name: otel-iis
6 | static_configs:
7 | - targets: [iis:9182]
8 |
9 | exporters:
10 | logging:
11 | loglevel: debug
12 | otlp/public:
13 | endpoint: ingest.lightstep.com:443
14 | headers:
15 | "lightstep-access-token": "${LS_ACCESS_TOKEN}"
16 |
17 | processors:
18 | batch:
19 |
20 | service:
21 | pipelines:
22 | metrics/iis:
23 | receivers: [prometheus]
24 | processors: [batch]
25 | exporters: [logging, otlp/public]
26 |
--------------------------------------------------------------------------------
/collector/iis/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '3.9'
2 |
3 | services:
4 | otel-collector:
5 | build:
6 | context: ./
7 | dockerfile: ./Dockerfile_collector
8 | environment:
9 | LS_ACCESS_TOKEN: ${LS_ACCESS_TOKEN}
10 | networks:
11 | - integrations
12 |
13 | iis:
14 | build:
15 | dockerfile: ./Dockerfile_iis
16 | ports:
17 | - "9182:9182"
18 | networks:
19 | - integrations
20 |
21 | networks:
22 | integrations:
23 |
--------------------------------------------------------------------------------
/collector/istio/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM golang:1.17-alpine AS build
2 |
3 | WORKDIR /app
4 | COPY . .
5 | RUN go build -o app main.go
6 |
7 | FROM alpine:3.14
8 | COPY --from=build /app/app /app
9 | ENTRYPOINT ["/app"]
10 |
--------------------------------------------------------------------------------
/collector/istio/go-istio-demo.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | name: go-istio-demo
5 | labels:
6 | istio-injection: enabled
7 | ---
8 | apiVersion: apps/v1
9 | kind: Deployment
10 | metadata:
11 | name: go-istio-demo
12 | namespace: go-istio-demo
13 | spec:
14 | replicas: 1
15 | selector:
16 | matchLabels:
17 | app: go-istio-demo
18 | template:
19 | metadata:
20 | labels:
21 | app: go-istio-demo
22 | spec:
23 | containers:
24 | - name: app
25 | image: go-istio-demo:latest
26 | imagePullPolicy: IfNotPresent
27 | ports:
28 | - containerPort: 8080
29 | ---
30 | apiVersion: v1
31 | kind: Service
32 | metadata:
33 | name: go-istio-demo
34 | namespace: go-istio-demo
35 | spec:
36 | selector:
37 | app: go-istio-demo
38 | ports:
39 | - protocol: TCP
40 | port: 80
41 | targetPort: 8080
42 | type: ClusterIP
43 |
--------------------------------------------------------------------------------
/collector/istio/go.mod:
--------------------------------------------------------------------------------
1 | module opentelementry-examples/istio
2 |
3 | go 1.19
4 |
--------------------------------------------------------------------------------
/collector/istio/istio-operator.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: install.istio.io/v1alpha1
2 | kind: IstioOperator
3 | metadata:
4 | namespace: istio-system
5 | name: istiocontrolplane
6 | spec:
7 | profile: default
8 | components:
9 | prometheus:
10 | enabled: true
11 | values:
12 | global:
13 | proxy:
14 | autoInject: "enabled"
15 |
--------------------------------------------------------------------------------
/collector/istio/kind-config.yaml:
--------------------------------------------------------------------------------
1 | kind: Cluster
2 | apiVersion: kind.x-k8s.io/v1alpha4
3 | nodes:
4 | - role: control-plane
5 | - role: worker
6 |
--------------------------------------------------------------------------------
/collector/istio/lightstep-secret.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Secret
3 | metadata:
4 | name: lightstep-access-token
5 | type: Opaque
6 | data:
7 | access_token: ${LS_ACCESS_TOKEN}
8 |
--------------------------------------------------------------------------------
/collector/istio/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "fmt"
5 | "log"
6 | "net/http"
7 | )
8 |
9 | func main() {
10 | http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
11 | fmt.Fprintf(w, "Hello, Golang with Istio!")
12 | log.Println("Request received")
13 | })
14 |
15 | log.Println("Starting server on port 8080")
16 | log.Fatal(http.ListenAndServe(":8080", nil))
17 | }
18 |
--------------------------------------------------------------------------------
/collector/istio/otel-collector-deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: otel-collector
5 | labels:
6 | app: otel-collector
7 | spec:
8 | replicas: 1
9 | selector:
10 | matchLabels:
11 | app: otel-collector
12 | template:
13 | metadata:
14 | labels:
15 | app: otel-collector
16 | spec:
17 | serviceAccountName: otel-collector
18 | containers:
19 | - name: otel-collector
20 | image: otel/opentelemetry-collector-contrib:latest
21 | args:
22 | - "--config=/conf/otel-collector-config.yaml"
23 | ports:
24 | - containerPort: 55681
25 | env:
26 | - name: LS_ACCESS_TOKEN
27 | valueFrom:
28 | secretKeyRef:
29 | name: lightstep-access-token
30 | key: access_token
31 | volumeMounts:
32 | - name: otel-collector-config-vol
33 | mountPath: /conf
34 | volumes:
35 | - configMap:
36 | name: otel-collector-conf
37 | name: otel-collector-config-vol
38 |
--------------------------------------------------------------------------------
/collector/istio/otel-collector-rbac.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | name: otel-collector
5 | namespace: default
6 |
7 | ---
8 |
9 | apiVersion: rbac.authorization.k8s.io/v1
10 | kind: ClusterRole
11 | metadata:
12 | name: otel-collector
13 | rules:
14 | - apiGroups: [""]
15 | resources:
16 | - nodes
17 | - nodes/metrics
18 | - nodes/proxy
19 | - nodes/stats
20 | - pods
21 | - services
22 | verbs: ["get", "list", "watch"]
23 | - nonResourceURLs: ["/metrics"]
24 | verbs: ["get"]
25 |
26 | ---
27 |
28 | apiVersion: rbac.authorization.k8s.io/v1
29 | kind: ClusterRoleBinding
30 | metadata:
31 | name: otel-collector
32 | roleRef:
33 | apiGroup: rbac.authorization.k8s.io
34 | kind: ClusterRole
35 | name: otel-collector
36 | subjects:
37 | - kind: ServiceAccount
38 | name: otel-collector
39 | namespace: default
40 |
--------------------------------------------------------------------------------
/collector/istio/otel-collector-service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: otel-collector
5 | labels:
6 | app: otel-collector
7 | spec:
8 | selector:
9 | app: otel-collector
10 | ports:
11 | - protocol: TCP
12 | port: 80
13 | targetPort: 55681
14 | type: LoadBalancer
15 |
--------------------------------------------------------------------------------
/collector/k8s-tracing/apiserver-tracing.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apiserver.config.k8s.io/v1alpha1
2 | kind: TracingConfiguration
3 | endpoint: 192.168.1.253:4317
4 | samplingRatePerMillion: 1000000
5 |
--------------------------------------------------------------------------------
/collector/k8s-tracing/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: "3"
2 | services:
3 | opentelemetry-collector-contrib:
4 | image: otel/opentelemetry-collector-contrib:0.59.0
5 | command: ["--config=/etc/otel-collector-config.yml"]
6 | environment:
7 | LS_ACCESS_TOKEN: ${LS_ACCESS_TOKEN}
8 | ports:
9 | - 4317:4317
10 | volumes:
11 | - ./otel-collector-config.yaml:/etc/otel-collector-config.yml
12 |
--------------------------------------------------------------------------------
/collector/k8s-tracing/otel-collector-config.yaml:
--------------------------------------------------------------------------------
1 |
2 | receivers:
3 | otlp:
4 | protocols:
5 | # default 0.0.0.0:4317
6 | grpc:
7 |
8 | processors:
9 | batch:
10 |
11 | exporters:
12 | logging:
13 | loglevel: debug
14 | # Export to Cloud Observability Public Satellites
15 | otlp/lightstep:
16 | endpoint: ingest.lightstep.com:443
17 | headers:
18 | "lightstep-access-token": "${LS_ACCESS_TOKEN}"
19 | service:
20 | pipelines:
21 | traces:
22 | receivers: [otlp]
23 | processors: [batch]
24 | exporters: [logging, otlp/lightstep]
25 |
--------------------------------------------------------------------------------
/collector/k8s-tracing/readme.md:
--------------------------------------------------------------------------------
1 |
2 | # Kubernetes kubelet, etcd, and API Server Tracing
3 |
4 | Demo of experimental distributed tracing features in Kubernetes 1.25+.
5 |
6 | ## Instructions
7 |
8 | Start an OpenTelemetry collector to ingest traces:
9 |
10 | ```
11 | $ export LS_ACCESS_TOKEN=
12 | $ docker-compose up
13 | ```
14 |
15 | Update *.yaml and *.toml config files to point to the *external* IP address of the collector started above (localhost won't work).
16 |
17 | Start minikube (> v1.26.1) with experimental tracing feature gates:
18 |
19 | ```
20 | $ ./run-minikube.sh
21 | ```
22 |
--------------------------------------------------------------------------------
/collector/k8s-tracing/run-minikube.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 |
4 | mkdir -p ~/.minikube/files/etc/ssl/certs/
5 | mkdir -p ~/.minikube/files/etc/crio
6 | mkdir -p ~/.minikube/files/etc/containerd
7 | cp apiserver-tracing.yaml ~/.minikube/files/etc/ssl/certs/apiserver-tracing.yaml
8 | cp kubelet-tracing.yaml ~/.minikube/files/etc/ssl/certs/kubelet-tracing.yaml
9 | cp crio.conf ~/.minikube/files/etc/crio/crio.conf
10 | cp containerd.toml ~/.minikube/files/etc/containerd/config.toml
11 |
12 | SPAN_INGEST_ADDR=192.168.1.253:4317
13 |
14 | # requires minikube v1.26.1 or greater
15 |
16 | minikube start --kubernetes-version=v1.25.0-rc.1 \
17 | --feature-gates=APIServerTracing=true \
18 | --extra-config=apiserver.feature-gates=APIServerTracing=true \
19 | --extra-config=apiserver.tracing-config-file=/etc/ssl/certs/apiserver-tracing.yaml \
20 | --extra-config=kubelet.config=/etc/ssl/certs/kubelet-tracing.yaml \
21 | --extra-config=etcd.experimental-enable-distributed-tracing=true \
22 | --extra-config=etcd.experimental-distributed-tracing-address=$SPAN_INGEST_ADDR
--------------------------------------------------------------------------------
/collector/kafka/collector.yml:
--------------------------------------------------------------------------------
1 | receivers:
2 | kafkametrics:
3 | brokers:
4 | - "kafka:9092"
5 | protocol_version: 2.0.0
6 | scrapers:
7 | - brokers
8 | - topics
9 | - consumers
10 | collection_interval: 5s
11 |
12 |
13 | processors:
14 | batch:
15 | exporters:
16 | logging:
17 | loglevel: debug
18 | otlp:
19 | endpoint: ingest.lightstep.com:443
20 | headers:
21 | "lightstep-access-token": "${LS_ACCESS_TOKEN}"
22 |
23 | service:
24 | pipelines:
25 | metrics:
26 | receivers: [kafkametrics]
27 | processors: [batch]
28 | exporters: [logging, otlp]
29 |
30 |
31 |
--------------------------------------------------------------------------------
/collector/kafka/src/consumer/Dockerfile:
--------------------------------------------------------------------------------
1 | # Use an official Go runtime as a parent image
2 | FROM golang:1.19
3 |
4 | # Set the working directory to the app directory
5 | WORKDIR /app
6 |
7 | # Copy the application files into the container
8 | COPY . .
9 |
10 | # Download the Go module dependencies
11 | RUN go mod init opentelementry-examples-kafka
12 | RUN go mod tidy
13 |
14 | # Build the Go application
15 | RUN go build -o consumer .
16 |
17 | # Start the producer when the container starts
18 | CMD ["./consumer"]
19 |
--------------------------------------------------------------------------------
/collector/kafka/src/consumer/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "context"
5 | )
6 |
7 | func main() {
8 | // Create a new context with a cancel function.
9 | ctx, _ := context.WithCancel(context.Background())
10 |
11 | StartConsumer(ctx)
12 | }
13 |
--------------------------------------------------------------------------------
/collector/kafka/src/producer/Dockerfile:
--------------------------------------------------------------------------------
1 | # Use an official Go runtime as a parent image
2 | FROM golang:1.19
3 |
4 | # Set the working directory to the app directory
5 | WORKDIR /app
6 |
7 | # Copy the application files into the container
8 | COPY . .
9 |
10 | # Download the Go module dependencies
11 | RUN go mod init opentelementry-examples-kafka
12 | RUN go mod tidy
13 |
14 | # Build the Go application
15 | RUN go build -o producer .
16 |
17 | # Start the producer when the container starts
18 | CMD ["./producer"]
19 |
--------------------------------------------------------------------------------
/collector/kafka/src/producer/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "context"
5 | )
6 |
7 | func main() {
8 | // Create a new context with a cancel function.
9 | ctx, cancel := context.WithCancel(context.Background())
10 |
11 | StartProducer(ctx, cancel)
12 | }
13 |
--------------------------------------------------------------------------------
/collector/kong/Makefile:
--------------------------------------------------------------------------------
1 |
2 | all: setup install-all
3 |
4 | setup: create-cluster add-repositories
5 | install-all: install-contour install-kong install-collector
6 |
7 | add-repositories:
8 | helm repo add bitnami https://charts.bitnami.com/bitnami
9 | helm repo add open-telemetry https://open-telemetry.github.io/opentelemetry-helm-charts
10 |
11 | create-cluster:
12 | kind create cluster --config kind-config.yaml
13 |
14 | delete-cluster:
15 | kind delete cluster
16 |
17 | install-contour:
18 | helm upgrade --install my-contour bitnami/contour --namespace projectcontour --create-namespace
19 |
20 | install-kong:
21 | helm upgrade --install my-kong --set metrics.enabled=true bitnami/kong
22 |
23 | install-collector:
24 | ./install_collector.sh
25 |
--------------------------------------------------------------------------------
/collector/kong/install_collector.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | helm install my-collector open-telemetry/opentelemetry-collector -f values-collector.yaml -f - <
9 | $ export LS_ACCESS_TOKEN_2=
10 | $ docker-compose up
11 | ```
--------------------------------------------------------------------------------
/collector/mysql/collector.yml:
--------------------------------------------------------------------------------
1 | receivers:
2 | mysql:
3 | username: testuser
4 | password: testpassword
5 | endpoint: mysql:3306
6 | metrics:
7 | mysql.query.slow.count:
8 | enabled: true
9 |
10 | exporters:
11 | logging:
12 | loglevel: debug
13 | # configuring otlp to Cloud Observability
14 | otlp:
15 | endpoint: ingest.lightstep.com:443
16 | headers:
17 | "lightstep-access-token": "${LS_ACCESS_TOKEN}"
18 |
19 | processors:
20 | batch:
21 |
22 | service:
23 | pipelines:
24 | metrics:
25 | receivers: [mysql]
26 | processors: [batch]
27 | exporters: [logging, otlp]
28 |
--------------------------------------------------------------------------------
/collector/mysql/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: "3.7"
2 | services:
3 | mysql:
4 | container_name: mysql
5 | image: mysql:8.0.32
6 | restart: always
7 | environment:
8 | MYSQL_USER: testuser
9 | MYSQL_PASSWORD: testpassword
10 | MYSQL_ROOT_PASSWORD: testpassword
11 | MYSQL_ALLOW_EMPTY_PASSWORD: "yes"
12 | MYSQL_DATABASE: opentelemetry-tests
13 | networks:
14 | - integrations
15 | stop_grace_period: 1s
16 | volumes:
17 | - mysql-data:/var/lib/mysql
18 | - ./scripts/setup.sh:/docker-entrypoint-initdb.d/setup.sh:ro
19 |
20 | otel-collector:
21 | container_name: otel-collector
22 | image: otel/opentelemetry-collector-contrib:0.74.0
23 | command: ["--config=/conf/collector.yml"]
24 | environment:
25 | LS_ACCESS_TOKEN: ${LS_ACCESS_TOKEN}
26 | networks:
27 | - integrations
28 | volumes:
29 | - ./collector.yml:/conf/collector.yml:rw
30 |
31 | networks:
32 | integrations:
33 |
34 | volumes:
35 | mysql-data:
36 |
--------------------------------------------------------------------------------
/collector/nginx-ingresscontroller/.gitignore:
--------------------------------------------------------------------------------
1 | .*
2 | !/.gitignore
3 |
4 | *nginx-ingress-helm-operator/
5 |
--------------------------------------------------------------------------------
/collector/nginx-ingresscontroller/collector/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 | resources:
4 | - values.yaml
5 | - secret.yaml
6 | patchesStrategicMerge:
7 | - ./.patch.token.yaml
8 |
--------------------------------------------------------------------------------
/collector/nginx-ingresscontroller/collector/secret.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Secret
3 | metadata:
4 | name: lightstep-secret
5 | type:
6 | stringData:
7 | lightstep_access_token:
8 |
--------------------------------------------------------------------------------
/collector/nginx/collector.yml:
--------------------------------------------------------------------------------
1 | receivers:
2 | nginx/proxy:
3 | endpoint: 'http://nginx_proxy:8080/status'
4 | collection_interval: 10s
5 | nginx/appsrv:
6 | endpoint: 'http://nginx_appsrv:1080/status'
7 | collection_interval: 10s
8 |
9 | exporters:
10 | logging:
11 | loglevel: debug
12 | otlp/public:
13 | endpoint: ingest.lightstep.com:443
14 | headers:
15 | "lightstep-access-token": "${LS_ACCESS_TOKEN}"
16 |
17 | processors:
18 | resource/proxy:
19 | attributes:
20 | - key: instance.type
21 | value: "proxy"
22 | action: insert
23 | resource/appsrv:
24 | attributes:
25 | - key: instance.type
26 | value: "appsrv"
27 | action: insert
28 | batch:
29 |
30 | service:
31 | pipelines:
32 | metrics/proxy:
33 | receivers: [nginx/proxy]
34 | processors: [resource/proxy]
35 | exporters: [logging, otlp/public]
36 | metrics/appsrv:
37 | receivers: [nginx/appsrv]
38 | processors: [resource/appsrv]
39 | exporters: [logging, otlp/public]
40 |
--------------------------------------------------------------------------------
/collector/nginx/nginx-appsrv.conf:
--------------------------------------------------------------------------------
1 | server {
2 | listen 1080;
3 | server_name nginx_appsrv;
4 |
5 | location / {
6 | root /usr/share/nginx/html;
7 | index index.html index.htm;
8 | }
9 |
10 | error_page 500 502 503 504 /50x.html;
11 | location = /50x.html {
12 | root /usr/share/nginx/html;
13 | }
14 |
15 | # status module required for metrics collection
16 | location /status {
17 | stub_status;
18 | allow all;
19 | }
20 | }
21 |
--------------------------------------------------------------------------------
/collector/nginx/nginx-proxy.conf:
--------------------------------------------------------------------------------
1 | server {
2 | listen 8080;
3 | server_name nginx_proxy;
4 |
5 | # forwards requests
6 | location / {
7 | proxy_pass http://nginx_appsrv:1080;
8 | }
9 |
10 | # status module required for metrics collection
11 | location /status {
12 | stub_status;
13 | allow all;
14 | }
15 | }
16 |
--------------------------------------------------------------------------------
/collector/pgbouncer/collector.yml:
--------------------------------------------------------------------------------
1 | receivers:
2 | postgresql:
3 | endpoint: pgbouncer:5432
4 | username: dbuser
5 | password: hbZkzny5xrvVH
6 | databases:
7 | - test
8 | collection_interval: 5s
9 | tls:
10 | insecure: true
11 |
12 | processors:
13 | batch:
14 |
15 | exporters:
16 | logging:
17 | loglevel: debug
18 | otlp:
19 | endpoint: ingest.lightstep.com:443
20 | headers:
21 | "lightstep-access-token": "${LS_ACCESS_TOKEN}"
22 |
23 | service:
24 | telemetry:
25 | logs:
26 | level: "debug"
27 | pipelines:
28 | metrics:
29 | receivers: [ postgresql ]
30 | processors: [ batch ]
31 | exporters: [ logging,otlp ]
32 |
--------------------------------------------------------------------------------
/collector/php-fpm/collector.yml:
--------------------------------------------------------------------------------
1 | receivers:
2 | prometheus:
3 | config:
4 | scrape_configs:
5 | - job_name: otel-php-fpm
6 | static_configs:
7 | - targets: [php-fpm-exporter:9253]
8 |
9 | exporters:
10 | logging:
11 | loglevel: debug
12 | otlp/public:
13 | endpoint: ingest.lightstep.com:443
14 | headers:
15 | "lightstep-access-token": "${LS_ACCESS_TOKEN}"
16 |
17 | processors:
18 | batch:
19 |
20 | service:
21 | pipelines:
22 | metrics/php-fpm:
23 | receivers: [prometheus]
24 | processors: [batch]
25 | exporters: [logging, otlp/public]
26 |
--------------------------------------------------------------------------------
/collector/php-fpm/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '3.9'
2 |
3 | services:
4 | otel-collector:
5 | image: otel/opentelemetry-collector-contrib:0.60.0
6 | environment:
7 | LS_ACCESS_TOKEN: ${LS_ACCESS_TOKEN}
8 | configs:
9 | - source: collector_conf
10 | target: /conf/collector.yml
11 | command: ["--config=/conf/collector.yml"]
12 | networks:
13 | - integrations
14 |
15 | php-fpm:
16 | image: php:8.1.11-fpm
17 | configs:
18 | - source: www_conf
19 | target: /usr/local/etc/php-fpm.d/www.conf
20 | networks:
21 | - integrations
22 |
23 | php-fpm-exporter:
24 | image: hipages/php-fpm_exporter:2.2.0
25 | command: ["--phpfpm.scrape-uri", "tcp://php-fpm:9000/status"]
26 | networks:
27 | - integrations
28 |
29 | configs:
30 | collector_conf:
31 | file: ./collector.yml
32 | www_conf:
33 | file: ./www.conf
34 |
35 | networks:
36 | integrations:
37 |
--------------------------------------------------------------------------------
/collector/php-fpm/www.conf:
--------------------------------------------------------------------------------
1 | [www]
2 | user = www-data
3 | group = www-data
4 | listen = 127.0.0.1:9000
5 | pm = dynamic
6 | pm.max_children = 5
7 | pm.start_servers = 2
8 | pm.min_spare_servers = 1
9 | pm.max_spare_servers = 3
10 |
11 | pm.status_path = /status
12 |
--------------------------------------------------------------------------------
/collector/postgres/collector.yml:
--------------------------------------------------------------------------------
1 | receivers:
2 | postgresql:
3 | endpoint: postgres:5432
4 | username: postgres
5 | password: postgres
6 | databases:
7 | - postgres
8 | collection_interval: 5s
9 | tls:
10 | insecure: true
11 |
12 | processors:
13 | batch:
14 |
15 | exporters:
16 | logging:
17 | loglevel: debug
18 | otlp:
19 | endpoint: ingest.lightstep.com:443
20 | headers:
21 | "lightstep-access-token": "${LS_ACCESS_TOKEN}"
22 |
23 | service:
24 | telemetry:
25 | logs:
26 | level: "debug"
27 | pipelines:
28 | metrics:
29 | receivers: [postgresql]
30 | processors: [batch]
31 | exporters: [logging,otlp]
--------------------------------------------------------------------------------
/collector/powerdns/collector.yml:
--------------------------------------------------------------------------------
1 | receivers:
2 | prometheus:
3 | config:
4 | scrape_configs:
5 | - job_name: otel-powerdns
6 | static_configs:
7 | - targets: [powerdns:8082]
8 |
9 | exporters:
10 | logging:
11 | loglevel: debug
12 | otlp/public:
13 | endpoint: ingest.lightstep.com:443
14 | headers:
15 | "lightstep-access-token": "${LS_ACCESS_TOKEN}"
16 |
17 | processors:
18 | batch:
19 |
20 | service:
21 | pipelines:
22 | metrics/powerdns:
23 | receivers: [prometheus]
24 | processors: [batch]
25 | exporters: [logging, otlp/public]
26 |
--------------------------------------------------------------------------------
/collector/powerdns/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '3.9'
2 |
3 | services:
4 | otel-collector:
5 | image: otel/opentelemetry-collector-contrib:0.61.0
6 | environment:
7 | LS_ACCESS_TOKEN: ${LS_ACCESS_TOKEN}
8 | configs:
9 | - source: collector_conf
10 | target: /conf/collector.yml
11 | command: ["--config=/conf/collector.yml"]
12 | networks:
13 | - integrations
14 |
15 | powerdns:
16 | image: powerdns/pdns-recursor-47:4.7.3
17 | configs:
18 | - source: powerdns_recursor_conf
19 | target: /etc/powerdns/recursor.conf
20 | ports:
21 | - "8082:8082"
22 | networks:
23 | - integrations
24 |
25 | configs:
26 | collector_conf:
27 | file: ./collector.yml
28 | powerdns_recursor_conf:
29 | file: ./recursor.conf
30 |
31 | networks:
32 | integrations:
33 |
--------------------------------------------------------------------------------
/collector/powerdns/recursor.conf:
--------------------------------------------------------------------------------
1 | webserver=yes
2 | webserver-address=0.0.0.0
3 | webserver-allow-from=0.0.0.0/0
4 |
--------------------------------------------------------------------------------
/collector/prom-native-arangodb/collector.yml:
--------------------------------------------------------------------------------
1 | receivers:
2 | prometheus:
3 | config:
4 | scrape_configs:
5 | - job_name: 'arangodb'
6 | scrape_interval: 3s
7 | metrics_path: '/_admin/metrics/v2'
8 | scheme: 'http'
9 | tls_config:
10 | insecure_skip_verify: true
11 | static_configs:
12 | - targets: ['arangodb:8529']
13 |
14 | exporters:
15 | logging:
16 | loglevel: debug
17 | otlp:
18 | endpoint: ingest.lightstep.com:443
19 | headers:
20 | "lightstep-access-token": "${LS_ACCESS_TOKEN}"
21 |
22 | processors:
23 | batch:
24 |
25 | service:
26 | pipelines:
27 | metrics:
28 | receivers: [prometheus]
29 | processors: [batch]
30 | exporters: [logging, otlp]
31 |
32 |
--------------------------------------------------------------------------------
/collector/prom-native-arangodb/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '3.9'
2 |
3 | services:
4 | otel-collector:
5 | container_name: otel-collector
6 | image: otel/opentelemetry-collector-contrib:0.74.0
7 | command: [ "--config=/conf/collector.yml" ]
8 | environment:
9 | LS_ACCESS_TOKEN: ${LS_ACCESS_TOKEN}
10 | volumes:
11 | - ./collector.yml:/conf/collector.yml:rw
12 | depends_on:
13 | - arangodb
14 | arangodb:
15 | container_name: arangodb
16 | image: arangodb:${APP_VERSION}
17 | volumes:
18 | - ./arangodb.conf:/etc/arangodb3/arangodb.conf:rw
19 | environment:
20 | - ARANGO_ROOT_PASSWORD=password
21 | ports:
22 | - "8529:8529"
23 |
24 |
--------------------------------------------------------------------------------
/collector/prom-native-ceph/collector.yml:
--------------------------------------------------------------------------------
1 | receivers:
2 | prometheus:
3 | config:
4 | scrape_configs:
5 | - job_name: 'ceph-mgr'
6 | scrape_interval: 15s
7 | metrics_path: '/metrics'
8 | static_configs:
9 | - targets: ['mgr1:9283']
10 |
11 | exporters:
12 | logging:
13 | loglevel: debug
14 | otlp:
15 | endpoint: ingest.lightstep.com:443
16 | headers:
17 | "lightstep-access-token": "${LS_ACCESS_TOKEN}"
18 |
19 | processors:
20 | batch:
21 |
22 | service:
23 | telemetry:
24 | logs:
25 | level: DEBUG
26 | pipelines:
27 | metrics:
28 | receivers: [prometheus]
29 | processors: [batch]
30 | exporters: [logging, otlp]
--------------------------------------------------------------------------------
/collector/prom-native-clickhouse/collector.yml:
--------------------------------------------------------------------------------
1 | receivers:
2 | prometheus:
3 | config:
4 | scrape_configs:
5 | - job_name: 'clickhouse'
6 | scrape_interval: 3s
7 | metrics_path: '/metrics'
8 | scheme: 'http'
9 | tls_config:
10 | insecure_skip_verify: true
11 | static_configs:
12 | - targets: [ 'clickhouse:8001' ]
13 |
14 | exporters:
15 | logging:
16 | loglevel: debug
17 | otlp:
18 | endpoint: ingest.lightstep.com:443
19 | headers:
20 | "lightstep-access-token": "${LS_ACCESS_TOKEN}"
21 |
22 |
23 | processors:
24 | batch:
25 |
26 | service:
27 | telemetry:
28 | logs:
29 | level: DEBUG
30 | pipelines:
31 | metrics:
32 | receivers: [ prometheus ]
33 | processors: [ batch ]
34 | exporters: [ logging, otlp ]
35 |
--------------------------------------------------------------------------------
/collector/prom-native-clickhouse/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '3.7'
2 |
3 | services:
4 | otel-collector:
5 | container_name: otel-collector
6 | image: otel/opentelemetry-collector-contrib:0.56.0
7 | command: [ "--config=/conf/collector.yml" ]
8 | ports:
9 | - 8888:8888
10 | environment:
11 | LS_ACCESS_TOKEN: ${LS_ACCESS_TOKEN}
12 | volumes:
13 | - ./collector.yml:/conf/collector.yml:rw
14 | depends_on:
15 | - clickhouse
16 |
17 | clickhouse:
18 | image: yandex/clickhouse-server
19 | container_name: clickhouse-server
20 | restart: always
21 | ports:
22 | - 8001:8001
23 | - 8123:8123
24 | - 9000:9000
25 | - 9009:9009
26 | healthcheck:
27 | test: ["CMD", "wget", "--spider", "-q", "localhost:8123/ping"]
28 | interval: 30s
29 | timeout: 5s
30 | retries: 3
31 | volumes:
32 | - ./config/prom_conf.xml:/etc/clickhouse-server/config.xml
33 | ulimits:
34 | nproc: 65535
35 | nofile:
36 | soft: 262144
37 | hard: 262144
38 |
39 |
--------------------------------------------------------------------------------
/collector/prom-native-cockroachdb/collector.yml:
--------------------------------------------------------------------------------
1 | receivers:
2 | prometheus:
3 | config:
4 | scrape_configs:
5 | - job_name: 'roach1'
6 | scrape_interval: 3s
7 | metrics_path: '/_status/vars'
8 | params:
9 | format: ['prometheus']
10 | scheme: 'http'
11 | tls_config:
12 | insecure_skip_verify: true
13 | static_configs:
14 | - targets: ['roach1:8080', 'roach2:8080', 'roach3:8080']
15 |
16 | exporters:
17 | logging:
18 | loglevel: debug
19 | otlp:
20 | endpoint: ingest.lightstep.com:443
21 | headers:
22 | "lightstep-access-token": "${LS_ACCESS_TOKEN}"
23 |
24 | processors:
25 | batch:
26 |
27 | service:
28 | pipelines:
29 | metrics:
30 | receivers: [prometheus]
31 | processors: [batch]
32 | exporters: [logging, otlp]
33 |
--------------------------------------------------------------------------------
/collector/prom-native-cockroachdb/config/roach-init.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | # curl health endpoint
4 | while ! health=$(curl -s "http://roach1:8080/health?ready=1"); do
5 | sleep 0.1
6 | done
7 |
8 | # check if health check shows uninitialized cluster
9 | error='"error": "node is waiting for cluster initialization"'
10 | if [[ $health =~ $error ]]; then
11 | ./cockroach init --insecure --host=roach1:26257
12 | else
13 | echo "Cluster is up!"
14 | fi
--------------------------------------------------------------------------------
/collector/prom-native-micrometer/.gitignore:
--------------------------------------------------------------------------------
1 | .gradle
2 | build/
3 | !gradle/wrapper/gradle-wrapper.jar
4 | !**/src/main/**/build/
5 | !**/src/test/**/build/
6 |
7 | ### STS ###
8 | .apt_generated
9 | .classpath
10 | .factorypath
11 | .project
12 | .settings
13 | .springBeans
14 | .sts4-cache
15 | bin/
16 | !**/src/main/**/bin/
17 | !**/src/test/**/bin/
18 |
19 | ### IntelliJ IDEA ###
20 | .idea
21 | *.iws
22 | *.iml
23 | *.ipr
24 | out/
25 | !**/src/main/**/out/
26 | !**/src/test/**/out/
27 |
28 | ### NetBeans ###
29 | /nbproject/private/
30 | /nbbuild/
31 | /dist/
32 | /nbdist/
33 | /.nb-gradle/
34 |
35 | ### VS Code ###
36 | .vscode/
37 |
--------------------------------------------------------------------------------
/collector/prom-native-micrometer/backend/src/main/java/otel/example/micrometer/Application.java:
--------------------------------------------------------------------------------
1 | package otel.example.micrometer;
2 |
3 | import org.springframework.boot.SpringApplication;
4 | import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
5 | import org.springframework.boot.autoconfigure.SpringBootApplication;
6 | import org.springframework.context.annotation.ComponentScan;
7 | import org.springframework.scheduling.annotation.EnableScheduling;
8 |
9 | @SpringBootApplication
10 | @EnableAutoConfiguration
11 | @ComponentScan(basePackages = {"otel.example.micrometer"})
12 | @EnableScheduling
13 | public class Application {
14 |
15 | public static void main(String[] args) {
16 | SpringApplication.run(Application.class, args);
17 | }
18 | }
19 |
--------------------------------------------------------------------------------
/collector/prom-native-micrometer/backend/src/main/java/otel/example/micrometer/controllers/HomeController.java:
--------------------------------------------------------------------------------
1 | package otel.example.micrometer.controllers;
2 |
3 | import otel.example.micrometer.entity.Greeting;
4 | import otel.example.micrometer.repository.GreetingRepository;
5 | import org.springframework.beans.factory.annotation.Autowired;
6 | import org.springframework.stereotype.Controller;
7 | import org.springframework.ui.Model;
8 | import org.springframework.web.bind.annotation.GetMapping;
9 |
10 | @Controller
11 | public class HomeController {
12 |
13 | @Autowired
14 | private GreetingRepository repository;
15 |
16 | @GetMapping("/")
17 | public String showHome(String name, Model model) {
18 | Greeting dockerGreeting = repository.findById(1).orElse(new Greeting("Not Found 😕"));
19 | model = model.addAttribute("name", dockerGreeting.getName());
20 | return "home";
21 | }
22 |
23 | }
24 |
--------------------------------------------------------------------------------
/collector/prom-native-micrometer/backend/src/main/java/otel/example/micrometer/repository/GreetingRepository.java:
--------------------------------------------------------------------------------
1 | package otel.example.micrometer.repository;
2 |
3 | import otel.example.micrometer.entity.Greeting;
4 | import org.springframework.data.repository.CrudRepository;
5 | import org.springframework.stereotype.Repository;
6 |
7 | @Repository
8 | public interface GreetingRepository extends CrudRepository {
9 | }
10 |
--------------------------------------------------------------------------------
/collector/prom-native-micrometer/backend/src/main/resources/application.properties:
--------------------------------------------------------------------------------
1 | spring.jpa.properties.hibernate.dialect = org.hibernate.dialect.PostgreSQLDialect
2 | spring.jpa.hibernate.ddl-auto=none
3 | spring.jpa.hibernate.show-sql=true
4 |
5 | spring.datasource.url=jdbc:postgresql://db:5432/${POSTGRES_DB}
6 | spring.datasource.username=postgres
7 | spring.datasource.password=${POSTGRES_PASSWORD:db-wrz2z}
8 | spring.datasource.initialization-mode=always
9 | spring.datasource.initialize=true
10 | spring.datasource.schema=classpath:/schema.sql
11 | spring.datasource.continue-on-error=true
12 |
13 | # JMX
14 | management.endpoints.web.exposure.include=*
15 | management.endpoints.web.exposure.include=prometheus,health,info,metric
16 |
17 | management.health.probes.enabled=true
18 | management.endpoint.health.show-details=always
19 |
20 |
21 |
--------------------------------------------------------------------------------
/collector/prom-native-micrometer/backend/src/main/resources/data.sql:
--------------------------------------------------------------------------------
1 | INSERT INTO GREETINGS(name) values ('Docker');
2 |
--------------------------------------------------------------------------------
/collector/prom-native-micrometer/backend/src/main/resources/schema.sql:
--------------------------------------------------------------------------------
1 | CREATE TABLE IF NOT EXISTS GREETINGS (
2 | id serial PRIMARY KEY,
3 | name varchar(50) NOT NULL
4 | );
5 |
--------------------------------------------------------------------------------
/collector/prom-native-micrometer/backend/src/main/resources/templates/home.ftlh:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | Getting Started: Serving Web Content
5 |
6 |
7 |
8 | Hello from ${name}!
9 |
10 |
--------------------------------------------------------------------------------
/collector/prom-native-micrometer/collector.yml:
--------------------------------------------------------------------------------
1 | receivers:
2 | prometheus:
3 | config:
4 | scrape_configs:
5 | - job_name: 'micrometer-demo'
6 | scrape_interval: 20s
7 | scrape_timeout: 20s
8 | metrics_path: '/actuator/prometheus'
9 | tls_config:
10 | insecure_skip_verify: true
11 | scheme: http
12 | static_configs:
13 | - targets: ['host.docker.internal:8080']
14 |
15 | exporters:
16 | logging:
17 | loglevel: debug
18 | otlp:
19 | endpoint: ingest.lightstep.com:443
20 | headers:
21 | "lightstep-access-token": "${LS_ACCESS_TOKEN}"
22 |
23 |
24 | processors:
25 | batch:
26 |
27 | service:
28 | telemetry:
29 | logs:
30 | level: DEBUG
31 | pipelines:
32 | metrics:
33 | receivers: [prometheus]
34 | processors: [batch]
35 | exporters: [logging, otlp]
--------------------------------------------------------------------------------
/collector/prom-native-micrometer/db/password.txt:
--------------------------------------------------------------------------------
1 | db-wrz2z
--------------------------------------------------------------------------------
/collector/prom-native-nomad/collector.yml:
--------------------------------------------------------------------------------
1 | receivers:
2 | prometheus:
3 | config:
4 | scrape_configs:
5 | - job_name: 'nomad-server'
6 | scrape_interval: 10s
7 | metrics_path: '/v1/metrics'
8 | params:
9 | format: ['prometheus']
10 | static_configs:
11 | - targets: ['nomad-server:4646']
12 |
13 | exporters:
14 | logging:
15 | loglevel: debug
16 | otlp:
17 | endpoint: ingest.lightstep.com:443
18 | headers:
19 | "lightstep-access-token": ${LS_ACCESS_TOKEN}
20 |
21 | processors:
22 | batch:
23 |
24 | service:
25 | telemetry:
26 | logs:
27 | level: DEBUG
28 | pipelines:
29 | metrics:
30 | receivers: [prometheus]
31 | processors: [batch]
32 | exporters: [logging, otlp]
--------------------------------------------------------------------------------
/collector/prom-native-nomad/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '3.7'
2 |
3 | services:
4 | nomad-server:
5 | image: djenriquez/nomad
6 | container_name: nomad-server
7 | command: agent -dev
8 | ports:
9 | - 4646:4646
10 | privileged: true
11 | volumes:
12 | - ./nomad/config/local.json:/etc/nomad/local.json:rw
13 | - /var/run/docker.sock:/var/run/docker.sock:rw
14 | - /tmp:/tmp
15 |
16 | otel-collector:
17 | container_name: otel-collector
18 | image: otel/opentelemetry-collector-contrib:0.56.0
19 | command: [ "--config=/conf/collector.yml" ]
20 | ports:
21 | - 8888:8888
22 | environment:
23 | LS_ACCESS_TOKEN: ${LS_ACCESS_TOKEN}
24 | volumes:
25 | - ./collector.yml:/conf/collector.yml:rw
26 | depends_on:
27 | - nomad-server
--------------------------------------------------------------------------------
/collector/prom-native-nomad/nomad/config/local.json:
--------------------------------------------------------------------------------
1 | {
2 | "server": {
3 | "enabled": true,
4 | "bootstrap_expect": 3
5 | },
6 | "data_dir": "/nomad/data/",
7 | "bind_addr": "0.0.0.0",
8 | "log_level": "INFO",
9 | "enable_debug": true,
10 | "advertise": {
11 | "http": "127.0.0.1:4646",
12 | "rpc": "127.0.0.1:4647",
13 | "serf": "127.0.0.1:4648"
14 | },
15 | "client": {
16 | "enabled": true,
17 | "server_join": {
18 | "retry_join": [
19 | "127.0.0.1:4647"
20 | ]
21 | }
22 | },
23 | "telemetry": {
24 | "publish_allocation_metrics": true,
25 | "publish_node_metrics": true,
26 | "collection_interval": "1s",
27 | "disable_hostname": true,
28 | "prometheus_metrics": true
29 | }
30 | }
--------------------------------------------------------------------------------
/collector/prom-native-scylla/collector.yaml:
--------------------------------------------------------------------------------
1 | receivers:
2 | prometheus:
3 | config:
4 | scrape_configs:
5 | - job_name: 'scylladb'
6 | scrape_interval: 10s
7 | static_configs:
8 | - targets: ['scylladb:9180']
9 |
10 | processors:
11 | batch:
12 |
13 | exporters:
14 | logging:
15 | loglevel: debug
16 | otlp:
17 | endpoint: ingest.staging.lightstep.com:443
18 | headers:
19 | - lightstep-access-token: ${LS_ACCESS_TOKEN}
20 |
21 | service:
22 | pipelines:
23 | metrics:
24 | receivers: [prometheus]
25 | processors: [batch]
26 | exporters: [logging,otlp]
27 |
--------------------------------------------------------------------------------
/collector/prom-native-scylla/docker-compose.yaml:
--------------------------------------------------------------------------------
1 | version: '3.7'
2 | services:
3 | scylladb:
4 | image: scylladb/scylla:latest
5 | ports:
6 | - "9042:9042"
7 |
8 | otel-collector:
9 | container_name: otel-collector
10 | image: otel/opentelemetry-collector-contrib:0.81.0
11 | hostname: otel-collector
12 | restart: always
13 | command: [ "--config=/conf/collector.yaml" ]
14 | volumes:
15 | - ./collector.yaml:/conf/collector.yaml:rw
16 | environment:
17 | LS_ACCESS_TOKEN: "${LS_ACCESS_TOKEN}"
18 |
--------------------------------------------------------------------------------
/collector/prom-native-singlestore/collector.yml:
--------------------------------------------------------------------------------
1 | receivers:
2 | prometheus:
3 | config:
4 | scrape_configs:
5 | - job_name: 'node'
6 | scrape_interval: 10s
7 | metrics_path: '/metrics'
8 | params:
9 | format: ['prometheus']
10 | static_configs:
11 | - targets: ['singlestore:9104']
12 | - job_name: 'cluster'
13 | scrape_interval: 10s
14 | metrics_path: '/cluster-metrics'
15 | params:
16 | format: ['prometheus']
17 | static_configs:
18 | - targets: ['singlestore:9104']
19 |
20 | exporters:
21 | logging:
22 | loglevel: debug
23 | otlp:
24 | endpoint: ingest.lightstep.com:443
25 | headers:
26 | "lightstep-access-token": "${LS_ACCESS_TOKEN}"
27 |
28 | processors:
29 | batch:
30 |
31 | service:
32 | telemetry:
33 | logs:
34 | level: DEBUG
35 | pipelines:
36 | metrics:
37 | receivers: [prometheus]
38 | processors: [batch]
39 | exporters: [logging, otlp]
--------------------------------------------------------------------------------
/collector/prom-native-singlestore/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: "3.7"
2 |
3 | services:
4 | otel-collector:
5 | container_name: otel-collector
6 | image: otel/opentelemetry-collector-contrib:0.74.0
7 | command: [ "--config=/conf/collector.yml" ]
8 | ports:
9 | - "8888:8888"
10 | environment:
11 | LS_ACCESS_TOKEN: "${LS_ACCESS_TOKEN}"
12 | volumes:
13 | - ./collector.yml:/conf/collector.yml:ro
14 |
15 | singlestore:
16 | image: memsql/cluster-in-a-box:alma-8.1.2-32927bff38-4.0.11-1.16.0
17 | ports:
18 | - "3306:3306"
19 | - "8080:8080"
20 | - "9104:9104"
21 | volumes:
22 | - singlestore-data:/var/lib/memsql
23 | - ./init.sql:/init.sql
24 | environment:
25 | LICENSE_KEY: "${LICENSE_KEY}"
26 | START_AFTER_INIT: Y
27 | ROOT_PASSWORD: password_here
28 |
29 | volumes:
30 | singlestore-data:
31 |
--------------------------------------------------------------------------------
/collector/prom-native-vault/collector.yml:
--------------------------------------------------------------------------------
1 | receivers:
2 | prometheus:
3 | config:
4 | scrape_configs:
5 | - job_name: 'vault-server'
6 | scrape_interval: 10s
7 | metrics_path: '/v1/sys/metrics'
8 | params:
9 | format: ['prometheus']
10 | static_configs:
11 | - targets: ['vault-server:8200']
12 |
13 | exporters:
14 | logging:
15 | loglevel: debug
16 | otlp:
17 | endpoint: ingest.lightstep.com:443
18 | headers:
19 | "lightstep-access-token": "${LS_ACCESS_TOKEN}"
20 |
21 | processors:
22 | batch:
23 |
24 | service:
25 | telemetry:
26 | logs:
27 | level: DEBUG
28 | pipelines:
29 | metrics:
30 | receivers: [prometheus]
31 | processors: [batch]
32 | exporters: [logging, otlp]
33 |
--------------------------------------------------------------------------------
/collector/prom-native-vault/consul/config/consul-config.json:
--------------------------------------------------------------------------------
1 | {
2 | "datacenter": "localhost",
3 | "data_dir": "/consul/data",
4 | "log_level": "DEBUG",
5 | "server": true,
6 | "ui": true,
7 | "ports": {
8 | "dns": 53
9 | }
10 | }
--------------------------------------------------------------------------------
/collector/prom-native-vault/vault/config/policies/prometheus-metrics.hcl:
--------------------------------------------------------------------------------
1 | path "/sys/metrics" {
2 | capabilities = ["read"]
3 | }
--------------------------------------------------------------------------------
/collector/prom-native-vault/vault/config/server.hcl:
--------------------------------------------------------------------------------
1 | ui = true
2 | api_addr = "http://127.0.0.1:8200"
3 |
4 | storage "consul" {
5 | address = "consul-server:8500"
6 | path = "vault/"
7 | }
8 |
9 | telemetry {
10 | disable_hostname = true
11 | prometheus_retention_time = "12h"
12 | }
--------------------------------------------------------------------------------
/collector/rabbitmq/cmd/rabbitmq-simulator/go.mod:
--------------------------------------------------------------------------------
1 | module github.com/otel-rabbitmq
2 |
3 | go 1.18
4 |
5 | require github.com/rabbitmq/amqp091-go v1.3.4
6 |
--------------------------------------------------------------------------------
/collector/rabbitmq/cmd/rabbitmq-simulator/go.sum:
--------------------------------------------------------------------------------
1 | github.com/rabbitmq/amqp091-go v1.3.4 h1:tXuIslN1nhDqs2t6Jrz3BAoqvt4qIZzxvdbdcxWtHYU=
2 | github.com/rabbitmq/amqp091-go v1.3.4/go.mod h1:ogQDLSOACsLPsIq0NpbtiifNZi2YOz0VTJ0kHRghqbM=
3 |
--------------------------------------------------------------------------------
/collector/rabbitmq/config.yaml:
--------------------------------------------------------------------------------
1 | receivers:
2 | prometheus/rabbitmq:
3 | use_start_time_metric: false
4 | start_time_metric_regex: '^(.+_)*process_start_time_seconds$'
5 | config:
6 | scrape_configs:
7 | - job_name: 'rabbitmq-scraper'
8 | scrape_interval: 5s
9 | metrics_path: "/metrics/per-object"
10 | static_configs:
11 | - targets: [":15692"]
12 | exporters:
13 | logging:
14 | logLevel: debug
15 | otlp:
16 | endpoint: ingest.lightstep.com:443
17 | headers:
18 | - lightstep-access-token: ${LIGHTSTEP_ACCESS_TOKEN }
19 | service:
20 | pipelines:
21 | metrics:
22 | receivers: [prometheus/rabbitmq]
23 | exporters: [logging, otlp]
24 |
--------------------------------------------------------------------------------
/collector/redis/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM golang:latest
2 |
3 | WORKDIR /app
4 | COPY . .
5 |
6 | RUN go mod init redis-demo
7 | RUN go mod tidy
8 |
9 | RUN go build -o load-generator .
10 |
11 | CMD ["./load-generator"]
12 |
--------------------------------------------------------------------------------
/collector/redis/collector.yml:
--------------------------------------------------------------------------------
1 | receivers:
2 | redis:
3 | endpoint: redis:6379
4 |
5 | exporters:
6 | logging:
7 | loglevel: debug
8 | # configuring otlp to Cloud Observability
9 | otlp:
10 | endpoint: ingest.lightstep.com:443
11 | headers:
12 | "lightstep-access-token": "${LS_ACCESS_TOKEN}"
13 |
14 | processors:
15 | batch:
16 |
17 | service:
18 | pipelines:
19 | metrics:
20 | receivers: [redis]
21 | processors: [batch]
22 | exporters: [logging, otlp]
23 |
--------------------------------------------------------------------------------
/collector/redis/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: "3.9"
2 | services:
3 | redis:
4 | container_name: redis
5 | image: redis:latest
6 | healthcheck:
7 | test: ["CMD-SHELL", "redis-cli ping"]
8 | interval: 5s
9 | timeout: 3s
10 | retries: 3
11 | ports:
12 | - 6379:6379
13 | stop_grace_period: 1s
14 | networks:
15 | - integrations
16 | otel-collector:
17 | container_name: otel-collector
18 | image: otel/opentelemetry-collector-contrib:latest
19 | command: ["--config=/conf/collector.yml"]
20 | environment:
21 | - LS_ACCESS_TOKEN
22 | volumes:
23 | - ./collector.yml:/conf/collector.yml:rw
24 | depends_on:
25 | redis:
26 | condition: service_healthy
27 | networks:
28 | - integrations
29 | load-generator:
30 | build: .
31 | depends_on:
32 | redis:
33 | condition: service_healthy
34 | networks:
35 | - integrations
36 | networks:
37 | integrations:
38 |
--------------------------------------------------------------------------------
/collector/snmp/Dockerfile_snmp:
--------------------------------------------------------------------------------
1 | FROM alpine:3.16 as certs
2 | RUN apk --update add ca-certificates
3 |
4 | FROM debian:11
5 |
6 | RUN apt update
7 | RUN apt -y install snmpd
8 |
9 | COPY --from=certs /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt
10 |
11 | RUN mkdir /snmp
12 |
13 | RUN net-snmp-create-v3-user -ro -a MD5 -A password -x DES -X priv_password collector_user
14 |
15 | ENTRYPOINT ["snmpd", "-f"]
16 |
--------------------------------------------------------------------------------
/collector/snmp/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '3.9'
2 |
3 | services:
4 | otel-collector:
5 | image: otel/opentelemetry-collector-contrib:0.77.0
6 | environment:
7 | LS_ACCESS_TOKEN: ${LS_ACCESS_TOKEN}
8 | configs:
9 | - source: collector_conf
10 | target: /collector.yml
11 | command: ["--config=/collector.yml"]
12 | networks:
13 | - integrations
14 |
15 | snmpd:
16 | build:
17 | context: ./
18 | dockerfile: ./Dockerfile_snmp
19 | configs:
20 | - source: snmpd_conf
21 | target: /etc/snmp/snmpd.conf
22 | networks:
23 | - integrations
24 | ports:
25 | - "161:161"
26 |
27 | configs:
28 | collector_conf:
29 | file: ./collector.yml
30 | snmpd_conf:
31 | file: ./snmpd.conf
32 |
33 | networks:
34 | integrations:
35 |
--------------------------------------------------------------------------------
/collector/snmp/metrics.csv:
--------------------------------------------------------------------------------
1 | Name,Description,Unit,DataType,Attributes
2 | snmp_cpu_system,,By,Gauge,
3 | snmp_cpu_user,,By,Gauge,
4 | snmp_cpu_idle,,By,Gauge,
5 | snmp_memory_total_swap,,By,Gauge,
6 |
--------------------------------------------------------------------------------
/collector/solr/.gitignore:
--------------------------------------------------------------------------------
1 | certificate.pem
2 | key.pem
--------------------------------------------------------------------------------
/collector/solr/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM curlimages/curl:7.82.0 as curler
2 | ARG JMX_JAR_VERSION=v1.14.0
3 | USER root
4 |
5 | RUN curl -L \
6 | --output /opentelemetry-jmx-metrics.jar \
7 | "https://github.com/open-telemetry/opentelemetry-java-contrib/releases/download/${JMX_JAR_VERSION}/opentelemetry-jmx-metrics.jar"
8 |
9 | RUN curl -L -s \
10 | "https://github.com/open-telemetry/opentelemetry-collector-releases/releases/download/v0.81.0/otelcol-contrib_0.81.0_linux_amd64.tar.gz" | \
11 | tar -xvz -C /
12 |
13 | FROM ibmjava:8-jre
14 | WORKDIR /
15 |
16 | COPY certificate.pem /usr/local/share/ca-certificates/certificate.crt
17 |
18 | RUN cat /usr/local/share/ca-certificates/certificate.crt >> /etc/ssl/certs/ca-certificates.crt
19 |
20 | COPY --from=curler /opentelemetry-jmx-metrics.jar /opt/opentelemetry-jmx-metrics.jar
21 | COPY --from=curler /otelcol-contrib /otelcol-contrib
22 |
23 | ENTRYPOINT [ "/otelcol-contrib" ]
24 | CMD ["--config", "/etc/otel/config.yaml"]
--------------------------------------------------------------------------------
/collector/solr/collector.yml:
--------------------------------------------------------------------------------
1 | receivers:
2 | jmx/solr:
3 | jar_path: /opt/opentelemetry-jmx-metrics.jar
4 | endpoint: solr:18983
5 | target_system: jvm,solr
6 | collection_interval: 10s
7 |
8 | exporters:
9 | logging:
10 | loglevel: debug
11 |
12 | # configuring otlp to Cloud Observability
13 | otlp:
14 | endpoint: ingest.lightstep.com:443
15 | headers:
16 | - "lightstep-access-token": "${LS_ACCESS_TOKEN}"
17 |
18 | processors:
19 | batch:
20 |
21 | service:
22 | pipelines:
23 | metrics:
24 | receivers: [jmx/solr]
25 | processors: [batch]
26 | exporters: [logging, otlp]
27 |
--------------------------------------------------------------------------------
/collector/solr/nodeapp/.gitignore:
--------------------------------------------------------------------------------
1 | node_modules
2 | certificate.pem
--------------------------------------------------------------------------------
/collector/solr/nodeapp/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "solr-node-app",
3 | "version": "1.0.0",
4 | "description": "",
5 | "main": "index.js",
6 | "scripts": {
7 | "test": "echo \"Error: no test specified\" && exit 1"
8 | },
9 | "author": "pentacode",
10 | "license": "ISC",
11 | "dependencies": {
12 | "solr-node": "1.0.13"
13 | }
14 | }
--------------------------------------------------------------------------------
/collector/squid/collector.yml:
--------------------------------------------------------------------------------
1 | receivers:
2 | prometheus:
3 | config:
4 | scrape_configs:
5 | - job_name: otel-squid
6 | static_configs:
7 | - targets: [squid-exporter:9301]
8 |
9 | exporters:
10 | logging:
11 | loglevel: debug
12 | otlp/public:
13 | endpoint: ingest.lightstep.com:443
14 | headers:
15 | "lightstep-access-token": "${LS_ACCESS_TOKEN}"
16 |
17 | processors:
18 | batch:
19 |
20 | service:
21 | pipelines:
22 | metrics/squid:
23 | receivers: [prometheus]
24 | processors: [batch]
25 | exporters: [logging, otlp/public]
26 |
--------------------------------------------------------------------------------
/collector/squid/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '3.9'
2 |
3 | services:
4 | otel-collector:
5 | image: otel/opentelemetry-collector-contrib:0.60.0
6 | environment:
7 | LS_ACCESS_TOKEN: ${LS_ACCESS_TOKEN}
8 | configs:
9 | - source: collector_conf
10 | target: /conf/collector.yml
11 | command: ["--config=/conf/collector.yml"]
12 | networks:
13 | - integrations
14 |
15 | squid:
16 | image: ubuntu/squid:5.2-22.04_edge
17 | configs:
18 | - source: squid_conf
19 | target: /etc/squid/squid.conf
20 | ports:
21 | - "3128:3128"
22 | networks:
23 | - integrations
24 |
25 | squid-exporter:
26 | image: boynux/squid-exporter:v1.10.3
27 | ports:
28 | - "9301:9301"
29 | command: ["-squid-hostname", "squid", "-squid-port", "3128", "-listen", ":9301"]
30 | networks:
31 | - integrations
32 |
33 | configs:
34 | collector_conf:
35 | file: ./collector.yml
36 | squid_conf:
37 | file: ./squid.conf
38 |
39 | networks:
40 | integrations:
41 |
--------------------------------------------------------------------------------
/collector/squid/squid.conf:
--------------------------------------------------------------------------------
1 |
2 | acl localnet src 10.0.0.0/8
3 | acl localnet src 172.16.0.0/12
4 | acl localnet src 192.168.0.0/16
5 |
6 | acl SSL_ports port 443
7 | acl Safe_ports port 80
8 | acl Safe_ports port 443
9 | acl CONNECT method CONNECT
10 |
11 | cache deny all
12 |
13 | # this setting allows access to the squid manager, which is required to get metrics
14 | http_access allow localnet manager
15 | http_access deny manager
16 |
17 | http_access deny !Safe_ports
18 |
19 | http_access deny CONNECT !SSL_ports
20 |
21 | http_access allow localnet
22 | http_access allow localhost
23 |
24 | http_access deny all
25 |
26 | http_port 3128
27 |
--------------------------------------------------------------------------------
/collector/statsd/README.md:
--------------------------------------------------------------------------------
1 | The OTEL Collector acts as a complete drop in complacement for Statsd. Both Statsd and the Collector listen on a UDP port.
2 |
3 | Run `docker-compose up`
4 | Run `generator.sh`
--------------------------------------------------------------------------------
/collector/statsd/collector.yaml:
--------------------------------------------------------------------------------
1 | receivers:
2 | statsd:
3 | statsd/2:
4 | endpoint: "otel-collector:8125"
5 | aggregation_interval: 70s
6 | enable_metric_type: true
7 | is_monotonic_counter: false
8 | timer_histogram_mapping:
9 | - statsd_type: "histogram"
10 | observer_type: "summary"
11 | - statsd_type: "timing"
12 | exporters:
13 | logging:
14 | logLevel: debug
15 | otlp:
16 | endpoint: ingest.lightstep.com:443
17 | headers:
18 | - lightstep-access-token: ${LS_ACCESS_TOKEN}
19 | service:
20 | pipelines:
21 | metrics:
22 | receivers: [statsd, statsd/2]
23 | exporters: [logging, otlp]
24 |
--------------------------------------------------------------------------------
/collector/statsd/docker-compose.yaml:
--------------------------------------------------------------------------------
1 | version: "3.9"
2 |
3 | services:
4 | otel-collector:
5 | image: otel/opentelemetry-collector-contrib:0.56.0
6 | hostname: otel-collector
7 | restart: always
8 | command: ["--config=/conf/collector.yaml"]
9 | ports:
10 | - 8125:8125/udp
11 | volumes:
12 | - ./collector.yaml:/conf/collector.yaml:rw
13 | environment:
14 | LS_ACCESS_TOKEN: ${LS_ACCESS_TOKEN}
15 |
--------------------------------------------------------------------------------
/collector/statsd/generator.sh:
--------------------------------------------------------------------------------
1 | #!/bin/zsh
2 |
3 | -x
4 |
5 | for (( ; ; ))
6 | do
7 | echo "sending metric"
8 | GAUGE_EC01=$(( ( RANDOM % 50 ) + 1 ))
9 | GAUGE_EC02=$(( ( RANDOM % 50 ) + 1 ))
10 | echo -n "collectd.ec01.nginx.nginx_connections-active:${GAUGE_EC01}|g" | nc -u -w0 localhost 8125
11 | echo -n "collectd.ec01.nginx.nginx_connections-active:${GAUGE_EC01}|g" | nc -u -w0 localhost 8125
12 | echo -n "collectd.ec02.nginx.nginx_connections-active:${GAUGE_EC02}|g" | nc -u -w0 localhost 8125
13 | echo "Send:"
14 | echo " [*] ec01: ${GAUGE_EC01}"
15 | echo " [*] ec02: ${GAUGE_EC02}"
16 | done
17 |
--------------------------------------------------------------------------------
/collector/tomcat/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM curlimages/curl:7.82.0 as curler
2 | ARG JMX_JAR_VERSION=v1.14.0
3 | USER root
4 | RUN curl -L \
5 | --output /opentelemetry-jmx-metrics.jar \
6 | "https://github.com/open-telemetry/opentelemetry-java-contrib/releases/download/${JMX_JAR_VERSION}/opentelemetry-jmx-metrics.jar"
7 |
8 | RUN curl -L -s \
9 | "https://github.com/open-telemetry/opentelemetry-collector-releases/releases/download/v0.53.0/otelcol-contrib_0.53.0_linux_amd64.tar.gz" | \
10 | tar -xvz -C /
11 |
12 | FROM ibmjava:8-jre
13 | WORKDIR /
14 |
15 | COPY --from=curler /opentelemetry-jmx-metrics.jar /opt/opentelemetry-jmx-metrics.jar
16 | COPY --from=curler /otelcol-contrib /otelcol-contrib
17 |
18 | ENTRYPOINT [ "/otelcol-contrib" ]
19 | CMD ["--config", "/etc/otel/config.yaml"]
--------------------------------------------------------------------------------
/collector/tomcat/collector.yml:
--------------------------------------------------------------------------------
1 | receivers:
2 | jmx/tomcat:
3 | jar_path: /opt/opentelemetry-jmx-metrics.jar
4 | endpoint: tomcat:9090
5 | target_system: jvm,tomcat
6 | collection_interval: 1s
7 |
8 | exporters:
9 | logging:
10 | loglevel: debug
11 | # configuring otlp to Cloud Observability
12 | otlp:
13 | endpoint: ingest.lightstep.com:443
14 | headers:
15 | "lightstep-access-token": "${LS_ACCESS_TOKEN}"
16 |
17 | processors:
18 | batch:
19 |
20 | service:
21 | pipelines:
22 | metrics:
23 | receivers: [jmx/tomcat]
24 | processors: [batch]
25 | exporters: [logging, otlp]
26 |
--------------------------------------------------------------------------------
/collector/vanilla/collector.yaml:
--------------------------------------------------------------------------------
1 | receivers:
2 | otlp:
3 | protocols:
4 | grpc:
5 | http:
6 |
7 | processors:
8 | batch:
9 |
10 | exporters:
11 | logging:
12 | otlp/ls:
13 | endpoint: ingest.lightstep.com:443
14 | headers:
15 | "lightstep-access-token": "${LIGHTSTEP_ACCESS_TOKEN}"
16 |
17 | service:
18 | pipelines:
19 | traces:
20 | receivers: [otlp]
21 | processors: [batch]
22 | exporters: [logging, otlp/ls]
23 | metrics:
24 | receivers: [otlp]
25 | processors: [batch]
26 | exporters: [logging,otlp/ls]
--------------------------------------------------------------------------------
/collector/vanilla/readme.md:
--------------------------------------------------------------------------------
1 | # How to run the Collector locally
2 |
3 | 1. Edit the [Collector config YAML](collector.yml)
4 |
5 | Replace `${LIGHTSTEP_ACCESS_TOKEN}` with your own [Cloud Observability Access Token](https://docs.lightstep.com/docs/create-and-manage-access-tokens)
6 |
7 | 2. Run the Collector's Docker container instance
8 |
9 | Ensure that you are in the repo root folder (`opentelemetry-examples`), then run:
10 |
11 | ```bash
12 | cd collector/vanilla
13 | docker run -it --rm -p 4317:4317 -p 4318:4318 \
14 | -v $(pwd)/collector.yaml:/otel-config.yaml \
15 | --name otelcol otel/opentelemetry-collector-contrib:0.53.0 \
16 | "/otelcol-contrib" \
17 | "--config=otel-config.yaml"
18 | ```
19 |
--------------------------------------------------------------------------------
/collector/varnish/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM golang:1.19 as builder
2 |
3 | RUN apt update
4 | RUN apt -y install git
5 | RUN git clone https://github.com/jonnenauha/prometheus_varnish_exporter.git
6 | WORKDIR /go/prometheus_varnish_exporter
7 | RUN git checkout 1.6.1
8 | RUN go build
9 |
10 |
11 | FROM varnish:7.1
12 |
13 | COPY --from=builder /go/prometheus_varnish_exporter/prometheus_varnish_exporter /usr/local/bin
14 |
--------------------------------------------------------------------------------
/collector/varnish/collector.yml:
--------------------------------------------------------------------------------
1 | receivers:
2 | prometheus:
3 | config:
4 | scrape_configs:
5 | - job_name: otel-varnish
6 | static_configs:
7 | - targets: [varnish:9131]
8 | nginx/appsrv:
9 | endpoint: 'http://nginx_appsrv:1080/status'
10 | collection_interval: 10s
11 |
12 | exporters:
13 | logging:
14 | loglevel: debug
15 | otlp/public:
16 | endpoint: ingest.lightstep.com:443
17 | headers:
18 | "lightstep-access-token": "${LS_ACCESS_TOKEN}"
19 |
20 | processors:
21 | batch:
22 |
23 | service:
24 | pipelines:
25 | metrics/varnish:
26 | receivers: [prometheus]
27 | processors: [batch]
28 | exporters: [logging, otlp/public]
29 | metrics/appsrv:
30 | receivers: [nginx/appsrv]
31 | processors: [batch]
32 | exporters: [logging, otlp/public]
33 |
--------------------------------------------------------------------------------
/collector/varnish/docker-varnish-entrypoint:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | varnishd -f /etc/varnish/default.vcl -a http=:80,HTTP -a proxy=:8443,PROXY -p feature=+http2 -s malloc,$VARNISH_SIZE
4 | /usr/local/bin/prometheus_varnish_exporter
5 |
--------------------------------------------------------------------------------
/collector/varnish/nginx-appsrv.conf:
--------------------------------------------------------------------------------
1 | server {
2 | listen 1080;
3 | server_name nginx_appsrv;
4 |
5 | location / {
6 | root /usr/share/nginx/html;
7 | index index.html index.htm;
8 | }
9 |
10 | error_page 500 502 503 504 /50x.html;
11 | location = /50x.html {
12 | root /usr/share/nginx/html;
13 | }
14 |
15 | # status module required for metrics collection
16 | location /status {
17 | stub_status;
18 | allow all;
19 | }
20 | }
21 |
--------------------------------------------------------------------------------
/collector/varnish/varnish.vcl:
--------------------------------------------------------------------------------
1 | vcl 4.1;
2 |
3 | backend default {
4 | .host = "nginx_appsrv";
5 | .port = "1080";
6 | }
7 |
--------------------------------------------------------------------------------
/collector/zookeeper/collector.yml:
--------------------------------------------------------------------------------
1 | receivers:
2 | prometheus/zookeeper:
3 | config:
4 | scrape_configs:
5 | - job_name: otel-zookeeper-eg
6 | scrape_interval: 5s
7 | static_configs:
8 | - targets: ["zookeeper:7000"]
9 |
10 | exporters:
11 | logging:
12 | loglevel: debug
13 | otlp/public:
14 | endpoint: ingest.lightstep.com:443
15 | headers:
16 | "lightstep-access-token": "${LS_ACCESS_TOKEN}"
17 |
18 | processors:
19 | batch:
20 |
21 | service:
22 | pipelines:
23 | metrics:
24 | receivers: [prometheus/zookeeper]
25 | processors: [batch]
26 | exporters: [otlp/public, logging]
27 |
--------------------------------------------------------------------------------
/collector/zookeeper/docker-compose.override.yml:
--------------------------------------------------------------------------------
1 | version: "3.8"
2 | services:
3 | otel-collector:
4 | image: otel/opentelemetry-collector-contrib:${OTEL_COLLECTOR_VERSION}
5 | environment:
6 | LS_ACCESS_TOKEN: ${LS_ACCESS_TOKEN}
7 | configs:
8 | - source: collector_conf
9 | target: /conf/collector.yml
10 | depends_on:
11 | zookeeper:
12 | condition: service_healthy
13 | volumes:
14 | - ./statsout:/statsout
15 | command: ["--config=/conf/collector.yml"]
16 | networks:
17 | - integrations
18 |
19 | configs:
20 | collector_conf:
21 | file: ./collector.yml
22 |
23 | # This provides output for the file receiver
24 | volumes:
25 | statsout:
26 | driver: local
27 |
--------------------------------------------------------------------------------
/collector/zookeeper/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '3.9'
2 |
3 | services:
4 | zookeeper:
5 | image: zookeeper
6 | ports:
7 | - ${ZOO_PORT}:2181
8 | - 7000:7000
9 | environment:
10 | - ALLOW_ANONYMOUS_LOGIN=${ALLOW_ANONYMOUS_LOGIN}
11 | networks:
12 | - integrations
13 | configs:
14 | - source: zoo_conf
15 | target: /conf/zoo.cfg
16 | healthcheck:
17 | test: echo stat | nc -z zookeeper 2181 || exit -1
18 | interval: 10s
19 | timeout: 20s
20 | retries: 10
21 | start_period: 30s
22 |
23 | configs:
24 | zoo_conf:
25 | file: ./zoo.cfg
26 |
27 | networks:
28 | integrations:
29 |
--------------------------------------------------------------------------------
/collector/zookeeper/zoo.cfg:
--------------------------------------------------------------------------------
1 | # https://zookeeper.apache.org/doc/r3.4.13/zookeeperStarted.html
2 | # required non-standard config for OTEL Collector config
3 | #4lw.commands.whitelist=stat,mntr,srvr
4 | # below config comes from default in Docker image
5 | dataDir=/data
6 | dataLogDir=/datalog
7 | tickTime=2000
8 | initLimit=5
9 | syncLimit=2
10 | autopurge.snapRetainCount=3
11 | autopurge.purgeInterval=0
12 | maxClientCnxns=60
13 | standaloneEnabled=true
14 | admin.enableServer=true
15 | server.1=localhost:2888:3888;2181
16 | metricsProvider.className=org.apache.zookeeper.metrics.prometheus.PrometheusMetricsProvider
17 |
--------------------------------------------------------------------------------
/config/example-aws-collector-config.yaml:
--------------------------------------------------------------------------------
1 | # Example config for the AWS OpenTelemetry Distro
2 | # This sends data to Cloud Observability, AWS EMF, and X-Ray
3 | # https://github.com/aws-observability/aws-otel-collector
4 |
5 | receivers:
6 | otlp:
7 | protocols:
8 | grpc:
9 | endpoint: 0.0.0.0:55680
10 |
11 | processors:
12 | batch/traces:
13 | timeout: 1s
14 | send_batch_size: 50
15 | batch/metrics:
16 | timeout: 60s
17 |
18 | exporters:
19 | awsxray:
20 | awsemf:
21 | logging:
22 | loglevel: debug
23 | otlp:
24 | endpoint: ingest.lightstep.com:443
25 | headers:
26 | "lightstep-access-token": ""
27 |
28 | service:
29 | pipelines:
30 | traces:
31 | receivers: [otlp]
32 | processors: [batch/traces]
33 | exporters: [awsxray, otlp, logging]
34 | metrics:
35 | receivers: [otlp]
36 | processors: [batch/metrics]
37 | exporters: [awsemf, otlp]
38 |
--------------------------------------------------------------------------------
/demo-client/otlp/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM python:3-alpine3.15
2 | RUN pip install opentelemetry-launcher requests pyyaml protobuf==3.20.1
3 | RUN opentelemetry-bootstrap -a install
4 |
5 | ADD client.py /app/client.py
6 | CMD ["opentelemetry-instrument", "/app/client.py"]
7 |
--------------------------------------------------------------------------------
/go/opentelemetry/collector/client/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM golang:latest
2 | WORKDIR /app
3 | COPY go.mod go.sum ./
4 | RUN go mod download
5 | COPY client.go .
6 | RUN CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -o client .
7 |
8 | FROM alpine:latest
9 | RUN apk --no-cache add ca-certificates
10 | WORKDIR /root/
11 | COPY --from=0 /app/client .
12 | CMD ["./client"]
--------------------------------------------------------------------------------
/go/opentelemetry/collector/server/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM golang:latest
2 | WORKDIR /app
3 | COPY go.mod go.sum ./
4 | RUN go mod download
5 | COPY server.go .
6 | RUN CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -o server .
7 |
8 | FROM alpine:latest
9 | RUN apk --no-cache add ca-certificates
10 | WORKDIR /root/
11 | COPY --from=0 /app/server .
12 | CMD ["./server"]
--------------------------------------------------------------------------------
/go/opentelemetry/otlp/client/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM golang:latest
2 | WORKDIR /app
3 | COPY go.mod go.sum ./
4 | RUN go mod download
5 | COPY client.go .
6 | RUN CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -o client .
7 |
8 | FROM alpine:latest
9 | RUN apk --no-cache add ca-certificates
10 | WORKDIR /root/
11 | COPY --from=0 /app/client .
12 | CMD ["./client"]
--------------------------------------------------------------------------------
/go/opentelemetry/otlp/env_vars.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | export OTEL_EXPORTER_OTLP_ENDPOINT="ingest.lightstep.com:443"
4 | export OTEL_EXPORTER_OTLP_HEADERS="lightstep-access-token="
5 |
--------------------------------------------------------------------------------
/go/opentelemetry/otlp/server/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM golang:latest
2 | WORKDIR /app
3 | COPY go.mod go.sum ./
4 | RUN go mod download
5 | COPY server.go .
6 | RUN CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -o server .
7 |
8 | FROM alpine:latest
9 | RUN apk --no-cache add ca-certificates
10 | WORKDIR /root/
11 | COPY --from=0 /app/server .
12 | CMD ["./server"]
--------------------------------------------------------------------------------
/go/opentracing/client/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM golang:latest
2 | WORKDIR /app
3 | COPY go.mod go.sum ./
4 | RUN go mod download
5 | COPY client.go .
6 | RUN CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -o client .
7 |
8 | FROM alpine:latest
9 | RUN apk --no-cache add ca-certificates
10 | WORKDIR /root/
11 | COPY --from=0 /app/client .
12 | CMD ["./client"]
--------------------------------------------------------------------------------
/go/opentracing/client/go.mod:
--------------------------------------------------------------------------------
1 | module github.com/lightstep/ls-examples/go/opentracing/client
2 |
3 | go 1.17
4 |
5 | require (
6 | github.com/lightstep/lightstep-tracer-go v0.26.0
7 | github.com/opentracing/opentracing-go v1.2.0
8 | )
9 |
10 | require (
11 | github.com/gogo/protobuf v1.3.2 // indirect
12 | github.com/golang/protobuf v1.3.1 // indirect
13 | github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20210210170715-a8dfcb80d3a7 // indirect
14 | golang.org/x/net v0.0.0-20201021035429-f5854403a974 // indirect
15 | golang.org/x/sys v0.0.0-20210217105451-b926d437f341 // indirect
16 | golang.org/x/text v0.3.3 // indirect
17 | google.golang.org/genproto v0.0.0-20190530194941-fb225487d101 // indirect
18 | google.golang.org/grpc v1.21.0 // indirect
19 | )
20 |
--------------------------------------------------------------------------------
/go/opentracing/server/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM golang:latest
2 | WORKDIR /app
3 | COPY go.mod go.sum ./
4 | RUN go mod download
5 | COPY server.go .
6 | RUN CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -o server .
7 |
8 | FROM alpine:latest
9 | RUN apk --no-cache add ca-certificates
10 | WORKDIR /root/
11 | COPY --from=0 /app/server .
12 | CMD ["./server"]
--------------------------------------------------------------------------------
/go/opentracing/server/go.mod:
--------------------------------------------------------------------------------
1 | module github.com/lightstep/ls-examples/go/opentracing/client
2 |
3 | go 1.17
4 |
5 | require (
6 | github.com/lightstep/lightstep-tracer-go v0.26.0
7 | github.com/opentracing-contrib/go-stdlib v1.0.0
8 | github.com/opentracing/opentracing-go v1.2.0
9 | )
10 |
11 | require (
12 | github.com/gogo/protobuf v1.3.2 // indirect
13 | github.com/golang/protobuf v1.3.1 // indirect
14 | github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20210210170715-a8dfcb80d3a7 // indirect
15 | golang.org/x/net v0.0.0-20201021035429-f5854403a974 // indirect
16 | golang.org/x/sys v0.0.0-20210217105451-b926d437f341 // indirect
17 | golang.org/x/text v0.3.3 // indirect
18 | google.golang.org/genproto v0.0.0-20190530194941-fb225487d101 // indirect
19 | google.golang.org/grpc v1.21.0 // indirect
20 | )
21 |
--------------------------------------------------------------------------------
/java/Makefile:
--------------------------------------------------------------------------------
1 |
2 | build:
3 | $(MAKE) -C server
4 | $(MAKE) -C client
5 |
6 | clean:
7 | $(MAKE) clean -C server
8 | $(MAKE) clean -C client
9 |
--------------------------------------------------------------------------------
/java/client/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM maven:3-eclipse-temurin-11 AS build
2 |
3 | RUN apt-get update
4 | RUN apt-get install -y curl
5 | RUN update-ca-certificates -f
6 |
7 | WORKDIR /usr/src/app
8 | RUN curl -o opentracing-specialagent-1.7.0.jar https://repo1.maven.org/maven2/io/opentracing/contrib/specialagent/opentracing-specialagent/1.7.0/opentracing-specialagent-1.7.0.jar
9 | COPY src ./src
10 | COPY pom.xml pom.xml
11 | RUN mvn -f /usr/src/app/pom.xml clean package
12 |
13 | FROM ibmjava:8-jre
14 |
15 | COPY --from=build /usr/src/app/opentracing-specialagent-1.7.0.jar /app/
16 | COPY --from=build /usr/src/app/target/client-1.0-SNAPSHOT.jar /app/
17 |
18 | ENTRYPOINT java -javaagent:/app/opentracing-specialagent-1.7.0.jar \
19 | -Dsa.tracer=lightstep \
20 | -Dls.componentName=$LS_SERVICE_NAME \
21 | -Dls.accessToken=$LS_ACCESS_TOKEN \
22 | -Dls.collectorHost=$LS_COLLECTOR_HOST \
23 | -Dls.metricsUrl=$LS_METRICS_URL \
24 | -Dls.propagator=b3 \
25 | -cp /app/client-1.0-SNAPSHOT.jar \
26 | com.lightstep.examples.client.App
27 |
--------------------------------------------------------------------------------
/java/client/Makefile:
--------------------------------------------------------------------------------
1 | .PHONY: build clean
2 |
3 | specialagent_url := https://repo1.maven.org/maven2/io/opentracing/contrib/specialagent/opentracing-specialagent/1.7.0/opentracing-specialagent-1.7.0.jar
4 |
5 | otel_auto_url := https://github.com/open-telemetry/opentelemetry-java-instrumentation/releases/latest/download/opentelemetry-javaagent.jar
6 |
7 | build: opentracing-specialagent-1.7.0.jar
8 | mvn package
9 |
10 | opentracing-specialagent-1.7.0.jar:
11 | wget ${specialagent_url}
12 |
13 | build-otel: otel-latest
14 | mvn package
15 |
16 | otel-latest:
17 | wget ${otel_auto_url}
18 |
19 | clean:
20 | mvn clean
21 |
--------------------------------------------------------------------------------
/java/microdonuts/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM maven:3-eclipse-temurin-11 AS build
2 |
3 | RUN apt-get update
4 | RUN apt-get install -y curl
5 | RUN update-ca-certificates -f
6 |
7 | WORKDIR /usr/src/app
8 | RUN curl -o opentelemetry-javaagent.jar https://github.com/open-telemetry/opentelemetry-java-instrumentation/releases/latest/download/opentelemetry-javaagent.jar
9 | COPY src ./src
10 | COPY pom.xml pom.xml
11 | COPY client ./client
12 | RUN mvn -f /usr/src/app/pom.xml clean package
13 |
14 | ENTRYPOINT mvn package exec:exec
15 |
--------------------------------------------------------------------------------
/java/microdonuts/README.md:
--------------------------------------------------------------------------------
1 | # MicroDonuts: An OpenTracing + OpenTelemetry Shim with Lighstep Launcher
2 |
3 | Welcome to MicroDonuts! This is a sample application instrumented with OpenTracing.
4 | It uses OpenTelemetry Shim with Lighstep Launcher.
5 |
6 | ## Step 0: Setup MicroDonuts
7 |
8 | ### Getting it
9 | Build the jar file (for this, Maven must be installed):
10 |
11 | ```
12 | mvn package
13 | ```
14 |
15 | ### Running
16 |
17 | MicroDonuts has two server components, `API` and `Kitchen`, which
18 | communicate each other over HTTP - they are, however, part of
19 | the same process:
20 |
21 | ```
22 | mvn package exec:exec
23 | ```
24 |
25 | #### Accessing
26 |
27 | In your web browser, navigate to http://127.0.0.1:10001 and order yourself some
28 | µ-donuts.
29 |
30 |
31 | #### Cloud Observability Configuration
32 |
33 | If you have access to [Cloud Observability](https://app.lightstep.com]), you will need your access token.
34 |
--------------------------------------------------------------------------------
/java/microdonuts/client/img/donut-choc.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lightstep/opentelemetry-examples/f074eff0fb916a708a524e36f1ba0ecd3ea774c3/java/microdonuts/client/img/donut-choc.jpg
--------------------------------------------------------------------------------
/java/microdonuts/client/img/donut-cinn.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lightstep/opentelemetry-examples/f074eff0fb916a708a524e36f1ba0ecd3ea774c3/java/microdonuts/client/img/donut-cinn.jpg
--------------------------------------------------------------------------------
/java/microdonuts/client/img/donut-glazed.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lightstep/opentelemetry-examples/f074eff0fb916a708a524e36f1ba0ecd3ea774c3/java/microdonuts/client/img/donut-glazed.png
--------------------------------------------------------------------------------
/java/microdonuts/client/img/donut-jelly.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lightstep/opentelemetry-examples/f074eff0fb916a708a524e36f1ba0ecd3ea774c3/java/microdonuts/client/img/donut-jelly.jpg
--------------------------------------------------------------------------------
/java/microdonuts/client/img/donut-old-fash.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lightstep/opentelemetry-examples/f074eff0fb916a708a524e36f1ba0ecd3ea774c3/java/microdonuts/client/img/donut-old-fash.jpg
--------------------------------------------------------------------------------
/java/microdonuts/client/img/donut-sprinkles.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lightstep/opentelemetry-examples/f074eff0fb916a708a524e36f1ba0ecd3ea774c3/java/microdonuts/client/img/donut-sprinkles.png
--------------------------------------------------------------------------------
/java/microdonuts/client/img/donuts.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lightstep/opentelemetry-examples/f074eff0fb916a708a524e36f1ba0ecd3ea774c3/java/microdonuts/client/img/donuts.png
--------------------------------------------------------------------------------
/java/microdonuts/src/main/java/com/otsample/api/resources/Donut.java:
--------------------------------------------------------------------------------
1 | package com.otsample.api.resources;
2 |
3 | import com.google.gson.annotations.SerializedName;
4 |
5 | public final class Donut
6 | {
7 | @SerializedName("order_id")
8 | String orderId;
9 | Status status;
10 |
11 | public Donut()
12 | {
13 | }
14 |
15 | public Donut(String orderId)
16 | {
17 | this.orderId = orderId;
18 | status = Status.NEW_ORDER;
19 | }
20 |
21 | public Donut clone()
22 | {
23 | Donut copy = new Donut();
24 | copy.orderId = orderId;
25 | copy.status = status;
26 |
27 | return copy;
28 | }
29 |
30 | public String getOrderId() { return orderId; }
31 | public void setOrderId(String value) { orderId = value; }
32 |
33 | public Status getStatus() { return status; }
34 | public void setStatus(Status value) { status = value; }
35 |
36 | }
37 |
38 |
--------------------------------------------------------------------------------
/java/microdonuts/src/main/java/com/otsample/api/resources/DonutAddRequest.java:
--------------------------------------------------------------------------------
1 | package com.otsample.api.resources;
2 |
3 | import com.google.gson.annotations.SerializedName;
4 |
5 | public class DonutAddRequest
6 | {
7 | @SerializedName("order_id")
8 | String orderId;
9 |
10 | public DonutAddRequest(String orderId)
11 | {
12 | this.orderId = orderId;
13 | }
14 |
15 | public String getOrderId() { return orderId; }
16 | public void setOrderId(String value) { orderId = value; }
17 | }
18 |
19 |
--------------------------------------------------------------------------------
/java/microdonuts/src/main/java/com/otsample/api/resources/DonutRequest.java:
--------------------------------------------------------------------------------
1 | package com.otsample.api.resources;
2 |
3 | public final class DonutRequest
4 | {
5 | String flavor;
6 | int quantity;
7 |
8 | public DonutRequest()
9 | {
10 | }
11 |
12 | public DonutRequest(String flavor, int quantity)
13 | {
14 | this.flavor = flavor;
15 | this.quantity = quantity;
16 | }
17 |
18 | public String getFlavor() { return flavor; }
19 | public void setFlavor(String value) { flavor = value; }
20 |
21 | public int getQuantity() { return quantity; }
22 | public void setQuantity(int value) { quantity = value; }
23 | }
24 |
25 |
--------------------------------------------------------------------------------
/java/microdonuts/src/main/java/com/otsample/api/resources/Status.java:
--------------------------------------------------------------------------------
1 | package com.otsample.api.resources;
2 |
3 | import com.google.gson.annotations.SerializedName;
4 |
5 | public enum Status
6 | {
7 | @SerializedName("order")
8 | NEW_ORDER,
9 | @SerializedName("received")
10 | RECEIVED,
11 | @SerializedName("cooking")
12 | COOKING,
13 | @SerializedName("ready")
14 | READY
15 | }
16 |
17 |
--------------------------------------------------------------------------------
/java/microdonuts/src/main/java/com/otsample/api/resources/StatusReq.java:
--------------------------------------------------------------------------------
1 | package com.otsample.api.resources;
2 |
3 | import com.google.gson.annotations.SerializedName;
4 |
5 | public final class StatusReq
6 | {
7 | @SerializedName("order_id")
8 | String orderId;
9 |
10 | public String getOrderId() { return orderId; }
11 | public void setOrderId(String value) { orderId = value; }
12 | }
13 |
14 |
--------------------------------------------------------------------------------
/java/otlp/Dockerfile.client:
--------------------------------------------------------------------------------
1 | FROM maven:3-eclipse-temurin-11 AS build
2 |
3 | RUN apt-get update
4 | RUN apt-get install -y curl
5 | RUN update-ca-certificates -f
6 |
7 | WORKDIR /usr/src/app
8 | RUN curl -o opentelemetry-javaagent.jar https://github.com/open-telemetry/opentelemetry-java-instrumentation/releases/latest/download/opentelemetry-javaagent.jar
9 | COPY src ./src
10 | COPY pom-client.xml pom.xml
11 | RUN mvn -f /usr/src/app/pom.xml clean package
12 |
13 | FROM ibmjava:8-jre
14 |
15 | COPY --from=build /usr/src/app/opentelemetry-javaagent.jar /app/
16 | COPY --from=build /usr/src/app/target/lightstep-otlp-client.jar /app/
17 |
18 | ENTRYPOINT java -jar /app/lightstep-otlp-client.jar \
19 | com.lightstep.otlp.client.Client
20 |
--------------------------------------------------------------------------------
/java/otlp/Dockerfile.server:
--------------------------------------------------------------------------------
1 | FROM maven:3-eclipse-temurin-11 AS build
2 |
3 | RUN apt-get update
4 | RUN apt-get install -y curl
5 | RUN update-ca-certificates -f
6 |
7 | WORKDIR /usr/src/app
8 | RUN curl -o opentelemetry-javaagent.jar https://github.com/open-telemetry/opentelemetry-java-instrumentation/releases/latest/download/opentelemetry-javaagent.jar
9 | COPY src ./src
10 | COPY pom-server.xml pom.xml
11 | RUN mvn -f /usr/src/app/pom.xml clean package
12 |
13 | FROM ibmjava:8-jre
14 |
15 | COPY --from=build /usr/src/app/opentelemetry-javaagent.jar /app/
16 | COPY --from=build /usr/src/app/target/lightstep-otlp-server.jar /app/
17 |
18 | ENTRYPOINT java \
19 | -jar /app/lightstep-otlp-server.jar\
20 | com.lightstep.otlp.server.ExampleServer
21 |
--------------------------------------------------------------------------------
/java/otlp/Makefile:
--------------------------------------------------------------------------------
1 | default: build
2 |
3 | build-client:
4 | mvn -f pom-client.xml package
5 |
6 | build-server:
7 | mvn -f pom-server.xml package
8 |
9 | build: build-client build-server
10 |
11 | run-client:
12 | java -jar target/lightstep-otlp-client.jar
13 |
14 | run-server:
15 | java -jar target/lightstep-otlp-server.jar
16 |
17 | clean:
18 | mvn -f pom-client.xml clean
--------------------------------------------------------------------------------
/java/ottrace/Dockerfile.client:
--------------------------------------------------------------------------------
1 | FROM maven:3-eclipse-temurin-11 AS build
2 |
3 | RUN apt-get update
4 | RUN apt-get install -y curl
5 | RUN update-ca-certificates -f
6 |
7 | WORKDIR /usr/src/app
8 | COPY src ./src
9 | COPY pom-client.xml pom.xml
10 | RUN mvn -f /usr/src/app/pom.xml clean package
11 |
12 | FROM ibmjava:8-jre
13 |
14 | COPY --from=build /usr/src/app/target/lightstep-ottrace-client.jar /app/
15 |
16 | ENTRYPOINT java -jar /app/lightstep-ottrace-client.jar \
17 | com.lightstep.ottrace.client.Client
18 |
--------------------------------------------------------------------------------
/java/ottrace/Dockerfile.server:
--------------------------------------------------------------------------------
1 | FROM maven:3-eclipse-temurin-11 AS build
2 |
3 | RUN apt-get update
4 | RUN apt-get install -y curl
5 | RUN update-ca-certificates -f
6 |
7 | WORKDIR /usr/src/app
8 | COPY src ./src
9 | COPY pom-server.xml pom.xml
10 | RUN mvn -f /usr/src/app/pom.xml clean package
11 |
12 | FROM ibmjava:8-jre
13 |
14 | COPY --from=build /usr/src/app/target/lightstep-ottrace-server.jar /app/
15 |
16 | ENTRYPOINT java \
17 | -jar /app/lightstep-ottrace-server.jar\
18 | com.lightstep.ottrace.server.ExampleServer
19 |
--------------------------------------------------------------------------------
/java/ottrace/Makefile:
--------------------------------------------------------------------------------
1 | default: build
2 |
3 | build-client:
4 | mvn -f pom-client.xml package
5 |
6 | build-server:
7 | mvn -f pom-server.xml package
8 |
9 | build: build-client build-server
10 |
11 | run-client:
12 | java -jar target/lightstep-ottrace-client.jar
13 |
14 | run-server:
15 | java -jar target/lightstep-ottrace-server.jar
16 |
17 | clean:
18 | mvn -f pom-client.xml clean
--------------------------------------------------------------------------------
/java/server/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM maven:3-eclipse-temurin-11 AS build
2 |
3 | RUN apt-get update
4 | RUN apt-get install -y curl
5 | RUN update-ca-certificates -f
6 |
7 | WORKDIR /usr/src/app
8 | RUN curl -o opentracing-specialagent-1.7.0.jar https://repo1.maven.org/maven2/io/opentracing/contrib/specialagent/opentracing-specialagent/1.7.0/opentracing-specialagent-1.7.0.jar
9 | COPY src ./src
10 | COPY pom.xml pom.xml
11 | RUN mvn -f /usr/src/app/pom.xml clean package
12 |
13 | FROM ibmjava:8-jre
14 |
15 | COPY --from=build /usr/src/app/opentracing-specialagent-1.7.0.jar /app/
16 | COPY --from=build /usr/src/app/target/server-1.0-SNAPSHOT.jar /app/
17 |
18 | ENTRYPOINT java -javaagent:/app/opentracing-specialagent-1.7.0.jar \
19 | -Dsa.tracer=lightstep \
20 | -Dls.componentName=$LS_SERVICE_NAME \
21 | -Dls.accessToken=$LS_ACCESS_TOKEN \
22 | -Dls.collectorHost=$LS_COLLECTOR_HOST \
23 | -Dls.metricsUrl=$LS_METRICS_URL \
24 | -Dls.propagator=b3 \
25 | -cp /app/server-1.0-SNAPSHOT.jar \
26 | com.lightstep.examples.server.App
27 |
--------------------------------------------------------------------------------
/java/server/Makefile:
--------------------------------------------------------------------------------
1 | .PHONY: build clean
2 |
3 | specialagent_url := https://repo1.maven.org/maven2/io/opentracing/contrib/specialagent/opentracing-specialagent/1.7.0/opentracing-specialagent-1.7.0.jar
4 |
5 | otel_auto_url := https://github.com/open-telemetry/opentelemetry-java-instrumentation/releases/latest/download/opentelemetry-javaagent.jar
6 |
7 | build: opentracing-specialagent-1.7.0.jar
8 | mvn package
9 |
10 | opentracing-specialagent-1.7.0.jar:
11 | wget ${specialagent_url}
12 |
13 | build-otel: otel-latest
14 | mvn package
15 |
16 | otel-latest:
17 | wget ${otel_auto_url}
18 |
19 | clean:
20 | mvn clean
21 |
--------------------------------------------------------------------------------
/java/server/src/main/java/com/lightstep/examples/server/App.java:
--------------------------------------------------------------------------------
1 | package com.lightstep.examples.server;
2 |
3 | import org.eclipse.jetty.server.Server;
4 | import org.eclipse.jetty.server.Handler;
5 | import org.eclipse.jetty.server.handler.ContextHandlerCollection;
6 |
7 | public class App
8 | {
9 | public static void main( String[] args )
10 | throws Exception
11 | {
12 | ContextHandlerCollection handlers = new ContextHandlerCollection();
13 | handlers.setHandlers(new Handler[] {
14 | new ApiContextHandler(),
15 | });
16 | Server server = new Server(8083);
17 | server.setHandler(handlers);
18 |
19 | server.start();
20 | server.dumpStdErr();
21 | server.join();
22 | }
23 | }
24 |
--------------------------------------------------------------------------------
/nodejs/README.md:
--------------------------------------------------------------------------------
1 | # js examples
2 |
3 | ## Environment variables
4 |
5 | Export or add to a .env file
6 |
7 | ```bash
8 | export LS_ACCESS_TOKEN=
9 | ```
10 |
11 | optionally, set the lightstep metrics url
12 |
13 | ```bash
14 | export LS_METRICS_URL=https://ingest.lightstep.com/metrics
15 | ```
16 |
17 | ## Start the client
18 |
19 | ```bash
20 | docker-compose up
21 | ```
22 |
23 | ## Supported variables
24 |
25 | | Name | Required | Default |
26 | | ------------------------ | -------- | ------------------------------------ |
27 | | LS_ACCESS_TOKEN | yes |
28 | | LS_SERVICE_NAME | yes | |
29 | | LS_METRICS_URL | No | https://ingest.lightstep.com/metrics |
30 |
--------------------------------------------------------------------------------
/nodejs/first-trace.js:
--------------------------------------------------------------------------------
1 | const opentelemetry = require('@opentelemetry/api');
2 | const { NodeTracerProvider } = require('@opentelemetry/node');
3 | const {
4 | SimpleSpanProcessor,
5 | ConsoleSpanExporter,
6 | } = require('@opentelemetry/tracing');
7 |
8 | // Create an exporter for sending span data
9 | const exporter = new ConsoleSpanExporter();
10 |
11 | // Create a provider for activating and tracking spans
12 | const tracerProvider = new NodeTracerProvider();
13 |
14 | // Configure a span processor for the tracer
15 | tracerProvider.addSpanProcessor(new SimpleSpanProcessor(exporter));
16 |
17 | // Register the tracer
18 | tracerProvider.register();
19 |
20 | const tracer = opentelemetry.trace.getTracer();
21 |
22 | const span = tracer.startSpan('foo');
23 | span.setAttribute('platform', 'osx');
24 | span.setAttribute('version', '1.2.3');
25 | span.addEvent('event in foo');
26 |
27 | const childSpan = tracer.startSpan('bar', {
28 | parent: span,
29 | });
30 |
31 | childSpan.end();
32 | span.end();
33 |
--------------------------------------------------------------------------------
/nodejs/otel-vanilla/metrics/README.md:
--------------------------------------------------------------------------------
1 | # OpenTelemetry Metrics Example (Vanilla Setup)
2 |
3 | This example shows how to configure OpenTelemetry JS to export metrics to Cloud Observability without any additional (non-OTel) dependencies.
4 |
5 | ## Installation
6 |
7 | ```
8 | npm i
9 | ```
10 |
11 | ## Run the Application
12 |
13 | - Export your access token as LS_ACCESS_TOKEN
14 |
15 | ```
16 | export LS_ACCESS_TOKEN=
17 | ```
18 |
19 | - Optionally, export a prefix for your metrics. By default the metrics will be prefixed `demo.`
20 |
21 | ```
22 | export METRICS_PREFIX=foo
23 | ```
24 |
25 | - Run the example
26 |
27 | ```
28 | npm run start
29 | ```
30 |
--------------------------------------------------------------------------------
/nodejs/otel-vanilla/metrics/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "otel-js-vanilla-metrics",
3 | "version": "1.0.0",
4 | "description": "",
5 | "main": "index.js",
6 | "scripts": {
7 | "start": "node app.js",
8 | "test": "echo \"Error: no test specified\" && exit 1"
9 | },
10 | "keywords": [],
11 | "author": "",
12 | "license": "ISC",
13 | "dependencies": {
14 | "@opentelemetry/api": "^1.3.0",
15 | "@opentelemetry/exporter-metrics-otlp-proto": "^0.34.0",
16 | "@opentelemetry/resources": "^1.8.0",
17 | "@opentelemetry/sdk-metrics": "^1.8.0",
18 | "@opentelemetry/semantic-conventions": "^1.8.0"
19 | }
20 | }
--------------------------------------------------------------------------------
/nodejs/otel-vanilla/tracing/app.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 | const opentelemetry = require('@opentelemetry/api');
3 | const tracer = opentelemetry.trace.getTracer('otel-js-demo');
4 | let count = 0;
5 |
6 | setInterval(() => {
7 | // start a trace by starting a new span
8 | tracer.startActiveSpan('parent', (parent) => {
9 | // set an attribute
10 | parent.setAttribute('count', count);
11 | // record an event
12 | parent.addEvent(`message: ${count}`);
13 |
14 | // create a child span
15 | const child1 = tracer.startSpan('child-1');
16 | child1.end();
17 |
18 | // create a second child span
19 | const child2 = tracer.startSpan('child-2');
20 | // record an error status on a span
21 | const err = new Error('there was a problem');
22 | child2.setStatus({code: opentelemetry.SpanStatusCode.ERROR, message: err.message});
23 | // record the err as an exception (event) on the span
24 | child2.recordException(err);
25 | child2.end();
26 |
27 | // end the trace
28 | parent.end();
29 | count++;
30 | });
31 | }, 10000);
32 |
--------------------------------------------------------------------------------
/nodejs/otel-vanilla/tracing/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "tracing",
3 | "version": "1.0.0",
4 | "description": "",
5 | "main": "index.js",
6 | "scripts": {
7 | "start": "node index.js"
8 | },
9 | "keywords": [],
10 | "author": "",
11 | "license": "ISC",
12 | "dependencies": {
13 | "@opentelemetry/api": "^1.2.0",
14 | "@opentelemetry/auto-instrumentations-node": "^0.33.1",
15 | "@opentelemetry/exporter-trace-otlp-proto": "^0.33.0",
16 | "@opentelemetry/resources": "^1.7.0",
17 | "@opentelemetry/sdk-node": "^0.33.0",
18 | "@opentelemetry/semantic-conventions": "^1.7.0"
19 | }
20 | }
21 |
--------------------------------------------------------------------------------
/nodejs/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "nodejs-getting-started",
3 | "version": "2.0.0",
4 | "description": "NodeJS Getting Started",
5 | "main": "first-trace.js",
6 | "scripts": {
7 | "test": "echo \"Error: no test specified\" && exit 1",
8 | "start": "node first-trace.js",
9 | "start:server": "node server.js",
10 | "start:client": "node client.js"
11 | },
12 | "keywords": [],
13 | "author": "",
14 | "license": "ISC",
15 | "dependencies": {
16 | "@opentelemetry/api": "^0.21.0",
17 | "@opentelemetry/exporter-collector": "^0.24.0",
18 | "@opentelemetry/node": "^0.23.0",
19 | "@opentelemetry/plugin-express": "^0.15.0",
20 | "@opentelemetry/plugin-http": "^0.11.0",
21 | "@opentelemetry/plugin-https": "^0.18.2",
22 | "@opentelemetry/tracing": "^0.23.0",
23 | "axios": "^0.27.2",
24 | "express": "^4.17.1"
25 | }
26 | }
27 |
--------------------------------------------------------------------------------
/python/opentelemetry/auto_instrumentation/Dockerfile.client:
--------------------------------------------------------------------------------
1 | FROM python:3-alpine3.15
2 |
3 | RUN apk add build-base
4 |
5 | RUN mkdir /app
6 | WORKDIR /app
7 | ADD requirements.txt .
8 | RUN pip install -r requirements.txt
9 |
10 | RUN opentelemetry-bootstrap -a install
11 |
12 | ADD *.py ./
13 | CMD ["opentelemetry-instrument", "/app/client.py", "test"]
14 |
--------------------------------------------------------------------------------
/python/opentelemetry/auto_instrumentation/Dockerfile.server:
--------------------------------------------------------------------------------
1 | FROM python:3-alpine3.15
2 |
3 | RUN apk add build-base
4 |
5 | RUN mkdir /app
6 | WORKDIR /app
7 | ADD requirements.txt .
8 | RUN pip install -r requirements.txt
9 |
10 | RUN opentelemetry-bootstrap -a install
11 |
12 | ADD *.py ./
13 | CMD ["opentelemetry-instrument", "/app/server.py", "test"]
--------------------------------------------------------------------------------
/python/opentelemetry/auto_instrumentation/requirements.txt:
--------------------------------------------------------------------------------
1 | # OTel-specific
2 | opentelemetry-distro
3 | opentelemetry-instrumentation-flask # Auto-instrumentation for Flask
4 | opentelemetry-exporter-otlp
5 |
6 | flask
7 | pymongo
8 | redis
9 | requests
10 | sqlalchemy
11 |
--------------------------------------------------------------------------------
/python/opentelemetry/manual_instrumentation/Dockerfile.client:
--------------------------------------------------------------------------------
1 | FROM python:3-alpine3.15
2 |
3 | RUN apk add build-base
4 |
5 | RUN mkdir /app
6 | WORKDIR /app
7 | ADD requirements.txt .
8 | RUN pip install -r requirements.txt
9 |
10 | ADD *.py ./
11 | CMD ["/app/client.py", "test"]
12 |
--------------------------------------------------------------------------------
/python/opentelemetry/manual_instrumentation/Dockerfile.server:
--------------------------------------------------------------------------------
1 | FROM python:3-alpine3.15
2 |
3 | RUN apk add build-base
4 |
5 | RUN mkdir /app
6 | WORKDIR /app
7 | ADD requirements.txt .
8 | RUN pip install -r requirements.txt
9 |
10 | ADD *.py ./
11 | CMD ["/app/server.py", "test"]
--------------------------------------------------------------------------------
/python/opentelemetry/manual_instrumentation/requirements.txt:
--------------------------------------------------------------------------------
1 | opentelemetry-api
2 | opentelemetry-sdk
3 | opentelemetry-exporter-otlp-proto-grpc
4 | # opentelemetry-instrumentation-flask
5 | # opentelemetry-instrumentation-requests
6 |
7 | flask
8 | pymongo
9 | redis
10 | requests
11 | sqlalchemy
--------------------------------------------------------------------------------
/python/opentracing/Dockerfile.client:
--------------------------------------------------------------------------------
1 | FROM python:3-alpine3.15
2 |
3 | RUN apk add build-base
4 |
5 | RUN mkdir /app
6 | WORKDIR /app
7 | ADD requirements.txt .
8 | RUN pip install -r requirements.txt
9 |
10 | # RUN opentelemetry-bootstrap -a install
11 |
12 | ADD client.py /app/client.py
13 | CMD ["opentelemetry-instrument", "python", "/app/client.py"]
14 |
--------------------------------------------------------------------------------
/python/opentracing/Dockerfile.server:
--------------------------------------------------------------------------------
1 | FROM python:3-alpine3.15
2 |
3 | RUN apk add build-base
4 |
5 | RUN mkdir /app
6 | WORKDIR /app
7 | ADD requirements.txt .
8 | RUN pip install -r requirements.txt
9 |
10 | # RUN opentelemetry-bootstrap -a install
11 |
12 | ADD server.py /app/server.py
13 | CMD ["opentelemetry-instrument", "python", "/app/server.py"]
14 |
--------------------------------------------------------------------------------
/python/opentracing/requirements.txt:
--------------------------------------------------------------------------------
1 | protobuf==3.20.2
2 | flask
3 | pymongo
4 | redis
5 | requests
6 | sqlalchemy
7 | opentelemetry-instrumentation==0.30b1
8 | opentelemetry-launcher==1.8.0
9 | opentelemetry-opentracing-shim
10 | opentelemetry-instrumentation-requests
11 | opentelemetry-instrumentation-flask
12 | setuptools>=65.5.1 # not directly required, pinned by Snyk to avoid a vulnerability
13 |
--------------------------------------------------------------------------------
/telegraf/http/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '3.9'
2 |
3 | services:
4 | telegraf:
5 | image: telegraf
6 | volumes:
7 | - ./telegraf:/telegraf
8 | environment:
9 | LS_ACCESS_TOKEN: ${LS_ACCESS_TOKEN}
10 | configs:
11 | - source: telegraf_conf
12 | target: /etc/telegraf/telegraf.conf
13 | depends_on:
14 | - demosvc
15 | networks:
16 | - integrations
17 |
18 | demosvc:
19 | image: golang:alpine
20 | working_dir: /app
21 | volumes:
22 | - ./app:/app
23 | command: "go run main.go"
24 | networks:
25 | - integrations
26 |
27 | configs:
28 | telegraf_conf:
29 | file: ./telegraf/telegraf.conf
30 |
31 | networks:
32 | integrations:
33 |
--------------------------------------------------------------------------------
/telegraf/http/telegraf/telegraf.conf:
--------------------------------------------------------------------------------
1 | [[inputs.http]]
2 | urls = [
3 | "http://demosvc:8080/heapbasics"
4 | ]
5 | timeout = "10s"
6 |
7 | data_format = "json"
8 | json_name_key = "name"
9 | json_time_key = "timestamp"
10 | json_time_format = "unix"
11 |
12 | # rename fields because the json formatting in demosvc causes prepending "fields_"
13 | [[processors.rename]]
14 | [[processors.rename.replace]]
15 | field = "fields_idle"
16 | dest = "idle"
17 | [[processors.rename.replace]]
18 | field = "fields_inuse"
19 | dest = "inuse"
20 | [[processors.rename.replace]]
21 | field = "fields_reserved"
22 | dest = "reserved"
23 |
24 | [[outputs.opentelemetry]]
25 | service_address = "ingest.lightstep.com:443"
26 | insecure_skip_verify = true
27 |
28 | [outputs.opentelemetry.headers]
29 | lightstep-access-token = "$LS_ACCESS_TOKEN"
30 |
31 | [[outputs.file]]
32 | files = ["stdout", "/telegraf/out-stream.lp"]
33 | data_format = "influx"
34 |
--------------------------------------------------------------------------------
/telegraf/influxdb-migrate/collector.yml:
--------------------------------------------------------------------------------
1 | receivers:
2 | otlp:
3 | protocols:
4 | grpc:
5 |
6 | processors:
7 | metricstransform:
8 | transforms:
9 | - include: ^airSensors_(.*)$$
10 | match_type: regexp
11 | action: update
12 | new_name: sensors.air.$${1}
13 | batch:
14 |
15 | exporters:
16 | otlp:
17 | endpoint: ingest.lightstep.com:443
18 | headers:
19 | "lightstep-access-token": "${LS_ACCESS_TOKEN}"
20 | logging:
21 | loglevel: debug
22 |
23 | service:
24 | telemetry:
25 | metrics:
26 | pipelines:
27 | metrics:
28 | receivers: [otlp]
29 | processors: [metricstransform, batch]
30 | exporters: [logging, otlp]
31 |
--------------------------------------------------------------------------------
/telegraf/influxdb-migrate/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '3.9'
2 |
3 | services:
4 | telegraf:
5 | image: telegraf
6 | volumes:
7 | - ./data:/data
8 | environment:
9 | LS_ACCESS_TOKEN: ${LS_ACCESS_TOKEN}
10 | configs:
11 | - source: telegraf_conf
12 | target: /etc/telegraf/telegraf.conf
13 | depends_on:
14 | - otel-collector
15 | networks:
16 | - integrations
17 |
18 | otel-collector:
19 | image: otel/opentelemetry-collector-contrib:0.59.0
20 | environment:
21 | LS_ACCESS_TOKEN: ${LS_ACCESS_TOKEN}
22 | configs:
23 | - source: collector_conf
24 | target: /conf/collector.yml
25 | command: ["--config=/conf/collector.yml"]
26 | networks:
27 | - integrations
28 |
29 | configs:
30 | telegraf_conf:
31 | file: ./telegraf.conf
32 | collector_conf:
33 | file: ./collector.yml
34 |
35 | networks:
36 | integrations:
37 |
--------------------------------------------------------------------------------
/telegraf/influxdb-migrate/telegraf.conf:
--------------------------------------------------------------------------------
1 | [[inputs.directory_monitor]]
2 | directory = "/data/in"
3 | finished_directory = "/data/done"
4 | data_format = "influx"
5 |
6 | [[outputs.opentelemetry]]
7 | service_address = "otel-collector:4317"
8 | # insecure_skip_verify = true
9 |
10 | [[outputs.file]]
11 | data_format = "json"
12 | files = ["stdout", "/data/metrics-out.json"]
13 |
--------------------------------------------------------------------------------
/telegraf/monit/telegraf/telegraf.conf:
--------------------------------------------------------------------------------
1 | [[inputs.monit]]
2 | address = "http://localhost:2812"
3 | username = "admin"
4 | password = "monit"
5 |
6 | [[outputs.opentelemetry]]
7 | service_address = "ingest.lightstep.com:443"
8 | insecure_skip_verify = true
9 |
10 | [outputs.opentelemetry.headers]
11 | lightstep-access-token = "$LS_ACCESS_TOKEN"
12 |
13 | [[outputs.file]]
14 | files = ["stdout", "telegraf/out-stream.json"]
15 | data_format = "json"
16 |
17 |
--------------------------------------------------------------------------------
/telegraf/mqtt/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: "3"
2 |
3 | services:
4 | broker:
5 | image: eclipse-mosquitto
6 | user: mosquitto
7 | volumes:
8 | - ./mosquitto:/mosquitto
9 |
10 | client:
11 | image: eclipse-mosquitto
12 | user: mosquitto
13 | volumes:
14 | - ./mosquitto:/mosquitto
15 | depends_on:
16 | - broker
17 |
18 | telegraf:
19 | container_name: telegraf
20 | image: telegraf
21 | volumes:
22 | - ./telegraf:/telegraf
23 | environment:
24 | LS_ACCESS_TOKEN: ${LS_ACCESS_TOKEN}
25 | configs:
26 | - source: telegraf_conf
27 | target: /etc/telegraf/telegraf.conf
28 | depends_on:
29 | - broker
30 |
31 | configs:
32 | telegraf_conf:
33 | file: ./telegraf/telegraf.conf
34 |
35 | networks:
36 | integrations:
37 |
--------------------------------------------------------------------------------
/telegraf/mqtt/mosquitto/config/mosquitto.conf:
--------------------------------------------------------------------------------
1 | persistence true
2 | # persistence_location /mosquitto/data/
3 | # persistence_file mosquitto.db
4 |
5 | listener 1883
6 | allow_anonymous true
7 |
8 | log_dest file /mosquitto/log/mosquitto.log
9 | log_dest stdout
10 |
11 |
12 |
--------------------------------------------------------------------------------
/telegraf/mqtt/telegraf/telegraf.conf:
--------------------------------------------------------------------------------
1 | [[inputs.mqtt_consumer]]
2 | servers = [
3 | "tcp://broker:1883"
4 | ]
5 | topics = [
6 | "test/topic"
7 | ]
8 | data_format = "json"
9 |
10 | [[outputs.file]]
11 | files = ["stdout", "/telegraf/out-stream.json"]
12 | data_format = "json"
13 |
14 | [[outputs.opentelemetry]]
15 | service_address = "ingest.lightstep.com:443"
16 | # of course there are keys to configure TLS properly
17 | insecure_skip_verify = true
18 |
19 | # Additional gRPC request metadata
20 | [outputs.opentelemetry.headers]
21 | lightstep-access-token = "$LS_ACCESS_TOKEN"
22 |
--------------------------------------------------------------------------------
/telegraf/net_response/app/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "fmt"
5 | "math/rand"
6 | "net"
7 | )
8 |
9 | func respond(conn *net.UDPConn, addr *net.UDPAddr) {
10 | var err error
11 | // 2/3 times responds "up" and rest "down"
12 | if rand.Intn(3) != 0 {
13 | _, err = conn.WriteToUDP([]byte("up"), addr)
14 | } else {
15 | _, err = conn.WriteToUDP([]byte("down"), addr)
16 | }
17 | if err != nil {
18 | fmt.Printf("Couldn't send response %v", err)
19 | }
20 | }
21 |
22 | func main() {
23 | p := make([]byte, 2048)
24 | addr := net.UDPAddr{
25 | Port: 9876,
26 | IP: net.ParseIP("0.0.0.0"),
27 | }
28 | conn, err := net.ListenUDP("udp", &addr)
29 | if err != nil {
30 | fmt.Printf("Some error %v\n", err)
31 | return
32 |
33 | }
34 | for {
35 | _, remoteaddr, err := conn.ReadFromUDP(p)
36 | if err != nil {
37 | fmt.Printf("Could not read. remoteaddr: %v - %v", remoteaddr, err)
38 | continue
39 | }
40 | go respond(conn, remoteaddr)
41 | }
42 | }
43 |
--------------------------------------------------------------------------------
/telegraf/net_response/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '3.9'
2 |
3 | services:
4 | telegraf:
5 | image: telegraf
6 | volumes:
7 | - ./telegraf:/telegraf
8 | environment:
9 | LS_ACCESS_TOKEN: ${LS_ACCESS_TOKEN}
10 | configs:
11 | - source: telegraf_conf
12 | target: /etc/telegraf/telegraf.conf
13 | depends_on:
14 | - demosvc
15 | networks:
16 | - integrations
17 |
18 | demosvc:
19 | image: golang:alpine
20 | working_dir: /app
21 | volumes:
22 | - ./app:/app
23 | command: "go run main.go"
24 | networks:
25 | - integrations
26 |
27 | configs:
28 | telegraf_conf:
29 | file: ./telegraf/telegraf.conf
30 |
31 | networks:
32 | integrations:
33 |
--------------------------------------------------------------------------------
/telegraf/net_response/telegraf/telegraf.conf:
--------------------------------------------------------------------------------
1 | [[inputs.net_response]]
2 | protocol = "udp"
3 | address = "demosvc:9876"
4 | send = "yolo"
5 | expect = "up"
6 | timeout = "2s"
7 |
8 | [[outputs.opentelemetry]]
9 | service_address = "ingest.lightstep.com:443"
10 | insecure_skip_verify = true
11 |
12 | [outputs.opentelemetry.headers]
13 | lightstep-access-token = "$LS_ACCESS_TOKEN"
14 |
15 | [[outputs.file]]
16 | files = ["stdout", "/telegraf/out-stream.json"]
17 | data_format = "json"
18 |
19 |
--------------------------------------------------------------------------------
/tools/integration/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM python:3-alpine3.15
2 |
3 |
4 | ADD requirements.txt requirements.txt
5 | RUN pip install -r requirements.txt
6 |
7 | ADD test.py /app/test.py
8 | ADD generate_config.py /app/generate_config.py
9 | CMD ["opentelemetry-instrument", "pytest", "/app/test.py"]
10 |
--------------------------------------------------------------------------------
/tools/integration/requirements.txt:
--------------------------------------------------------------------------------
1 | opentelemetry-launcher==1.8.0
2 | protobuf==3.20.2
3 | pyyaml
4 | pytest
5 | requests
6 | retry
7 | opentelemetry-instrumentation==0.41b0
8 | opentelemetry-instrumentation-requests
9 |
--------------------------------------------------------------------------------
/tools/precommit.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | docker-compose up config-generator
4 |
5 | if ! git diff --quiet; then
6 | echo;
7 | echo 'Working tree is not clean, did you forget to run "./tools/precommit.sh"?';
8 | echo;
9 | git status;
10 | exit 1;
11 | fi
--------------------------------------------------------------------------------
/tools/templates/config-prometheus.yaml:
--------------------------------------------------------------------------------
1 | receivers:
2 | prometheus/component:
3 | use_start_time_metric: false
4 | start_time_metric_regex: '^(.+_)*process_start_time_seconds$'
5 | config:
6 | scrape_configs:
7 | - job_name: 'component-scraper'
8 | scrape_interval: 5s
9 | metrics_path: "/metrics/per-object"
10 | static_configs:
11 | - targets: [":15692"]
12 | exporters:
13 | logging:
14 | logLevel: debug
15 | otlp:
16 | endpoint: ingest.lightstep.com:443
17 | headers:
18 | - lightstep-access-token: ${LIGHTSTEP_ACCESS_TOKEN }
19 | service:
20 | pipelines:
21 | metrics:
22 | receivers: [component/prometheus]
23 | exporters: [logging, otlp]
24 |
--------------------------------------------------------------------------------
/tools/templates/config.yaml:
--------------------------------------------------------------------------------
1 | receivers:
2 | component:
3 | use_start_time_metric: false
4 | start_time_metric_regex: '^(.+_)*process_start_time_seconds$'
5 | config:
6 | exporters:
7 | logging:
8 | logLevel: debug
9 | otlp:
10 | endpoint: ingest.lightstep.com:443
11 | headers:
12 | - lightstep-access-token: ${LIGHTSTEP_ACCESS_TOKEN }
13 | service:
14 | pipelines:
15 | metrics:
16 | receivers: [component]
17 | exporters: [logging, otlp]
18 |
--------------------------------------------------------------------------------
/tools/templates/docker-compose.yaml:
--------------------------------------------------------------------------------
1 | version: "3.2"
2 | services:
3 | component:
4 |
5 | exporters:
6 | logging:
7 | logLevel: debug
8 | otlp:
9 | endpoint: ingest.lightstep.com:443
10 | headers:
11 | - lightstep-access-token: ${LIGHTSTEP_ACCESS_TOKEN}
12 | service:
13 | pipelines:
14 | metrics:
15 | receivers: [component]
16 | exporters: [logging, otlp]
17 |
--------------------------------------------------------------------------------
/tools/update-token.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Added ".bak" per this StackExchange post: https://unix.stackexchange.com/a/92907
4 |
5 | cp ./config/example.env .env
6 | sed -i.bak "s##${TOKEN}#" .env
7 | sed -i.bak "s##${ORG_NAME}#" .env
8 | sed -i.bak "s##${PROJECT_NAME}#" .env
9 | sed -i.bak "s##${API_KEY}#" .env
10 | cp ./config/example-collector-config.yaml ./config/collector-config.yaml
11 | sed -i.bak "s##${TOKEN}#" ./config/collector-config.yaml
12 |
--------------------------------------------------------------------------------