├── .chainsaw.yaml ├── .chloggen ├── TEMPLATE.yaml ├── fix_opampbridge.yaml ├── issue_4071.yaml └── operator33.yaml ├── .ci └── create-release-github.sh ├── .github ├── CODEOWNERS ├── ISSUE_TEMPLATE │ ├── bug_report.yaml │ ├── feature_request.yaml │ └── other.yaml ├── dependabot.yml ├── pull_request_template.md └── workflows │ ├── auto-update-agent.yaml │ ├── changelog.yaml │ ├── check_links_config.json │ ├── continuous-integration.yaml │ ├── dependency-review.yml │ ├── e2e.yaml │ ├── fossa.yml │ ├── ossf-scorecard.yml │ ├── publish-autoinstrumentation-apache-httpd.yaml │ ├── publish-autoinstrumentation-dotnet.yaml │ ├── publish-autoinstrumentation-java.yaml │ ├── publish-autoinstrumentation-nodejs.yaml │ ├── publish-autoinstrumentation-php.yaml │ ├── publish-autoinstrumentation-python.yaml │ ├── publish-images.yaml │ ├── publish-must-gather.yaml │ ├── publish-operator-bundle.yaml │ ├── publish-operator-hub.yaml │ ├── publish-operator-opamp-bridge.yaml │ ├── publish-target-allocator.yaml │ ├── publish-test-e2e-images.yaml │ ├── release.yaml │ ├── reusable-operator-hub-release.yaml │ ├── reusable-publish-test-e2e-images.yaml │ ├── scorecard.yaml │ └── shellcheck.yaml ├── .gitignore ├── .golangci.yaml ├── .linkspector.yml ├── CHANGELOG.md ├── CONTRIBUTING.md ├── DEBUG.md ├── Dockerfile ├── LICENSE ├── Makefile ├── PROJECT ├── README.md ├── RELEASE.md ├── apis ├── v1alpha1 │ ├── allocation_strategy.go │ ├── convert.go │ ├── convert_test.go │ ├── groupversion_info.go │ ├── ingress_type.go │ ├── instrumentation_types.go │ ├── instrumentation_webhook.go │ ├── instrumentation_webhook_test.go │ ├── mode.go │ ├── opampbridge_capabilities.go │ ├── opampbridge_types.go │ ├── opampbridge_webhook.go │ ├── opampbridge_webhook_test.go │ ├── opentelemetrycollector_types.go │ ├── propagators.go │ ├── samplers.go │ ├── targetallocator_types.go │ ├── targetallocator_webhook.go │ ├── targetallocator_webhook_test.go │ ├── upgrade_strategy.go │ └── zz_generated.deepcopy.go └── v1beta1 │ ├── collector_webhook.go │ ├── collector_webhook_test.go │ ├── common.go │ ├── config.go │ ├── config_test.go │ ├── groupversion_info.go │ ├── helpers.go │ ├── helpers_test.go │ ├── ingress.go │ ├── metrics.go │ ├── metrics_test.go │ ├── mode.go │ ├── opentelemetrycollector_types.go │ ├── targetallocator_rbac.go │ ├── targetallocator_types.go │ ├── testdata │ ├── issue-3452.yaml │ ├── otelcol-connectors.yaml │ ├── otelcol-couchbase.yaml │ ├── otelcol-demo.yaml │ ├── otelcol-extensions.yaml │ ├── otelcol-filelog.yaml │ ├── otelcol-k8sevents.yaml │ ├── otelcol-null-values.yaml │ └── otelcol-pipelines.yaml │ ├── upgrade_strategy.go │ └── zz_generated.deepcopy.go ├── autoinstrumentation ├── apache-httpd │ ├── Dockerfile │ ├── README.md │ └── version.txt ├── dotnet │ ├── Dockerfile │ └── version.txt ├── java │ ├── Dockerfile │ └── version.txt ├── nodejs │ ├── .dockerignore │ ├── Dockerfile │ ├── package.json │ └── tsconfig.json ├── php │ ├── Dockerfile │ ├── composer_for_PHP_8.2_and_later.json │ ├── composer_for_PHP_before_8.2.json │ ├── prepare_files_for_docker_image.sh │ └── version.txt └── python │ ├── Dockerfile │ └── requirements.txt ├── bundle ├── community │ ├── bundle.Dockerfile │ ├── manifests │ │ ├── opentelemetry-operator-controller-manager-metrics-service_v1_service.yaml │ │ ├── opentelemetry-operator-metrics-reader_rbac.authorization.k8s.io_v1_clusterrole.yaml │ │ ├── opentelemetry-operator-webhook-service_v1_service.yaml │ │ ├── opentelemetry-operator.clusterserviceversion.yaml │ │ ├── opentelemetry.io_instrumentations.yaml │ │ ├── opentelemetry.io_opampbridges.yaml │ │ ├── opentelemetry.io_opentelemetrycollectors.yaml │ │ └── opentelemetry.io_targetallocators.yaml │ ├── metadata │ │ └── annotations.yaml │ └── tests │ │ └── scorecard │ │ └── config.yaml └── openshift │ ├── bundle.Dockerfile │ ├── manifests │ ├── opentelemetry-operator-controller-manager-metrics-service_v1_service.yaml │ ├── opentelemetry-operator-metrics-reader_rbac.authorization.k8s.io_v1_clusterrole.yaml │ ├── opentelemetry-operator-prometheus-rules_monitoring.coreos.com_v1_prometheusrule.yaml │ ├── opentelemetry-operator-prometheus_rbac.authorization.k8s.io_v1_role.yaml │ ├── opentelemetry-operator-prometheus_rbac.authorization.k8s.io_v1_rolebinding.yaml │ ├── opentelemetry-operator-webhook-service_v1_service.yaml │ ├── opentelemetry-operator.clusterserviceversion.yaml │ ├── opentelemetry.io_instrumentations.yaml │ ├── opentelemetry.io_opampbridges.yaml │ ├── opentelemetry.io_opentelemetrycollectors.yaml │ └── opentelemetry.io_targetallocators.yaml │ ├── metadata │ └── annotations.yaml │ └── tests │ └── scorecard │ └── config.yaml ├── cmd ├── gather │ ├── Dockerfile │ ├── README.md │ ├── cluster │ │ ├── cluster.go │ │ ├── cluster_test.go │ │ ├── write.go │ │ └── write_test.go │ ├── config │ │ └── config.go │ └── main.go ├── operator-opamp-bridge │ ├── Dockerfile │ ├── README.md │ ├── internal │ │ ├── agent │ │ │ ├── agent.go │ │ │ ├── agent_test.go │ │ │ ├── kube_resource_key.go │ │ │ ├── kube_resource_key_test.go │ │ │ └── testdata │ │ │ │ ├── agent.yaml │ │ │ │ ├── agentbasiccomponentsallowed.yaml │ │ │ │ ├── agentbatchnotallowed.yaml │ │ │ │ ├── agenthttpbasic.yaml │ │ │ │ ├── agentnoprocessorsallowed.yaml │ │ │ │ ├── basic.yaml │ │ │ │ ├── invalid.yaml │ │ │ │ └── updated.yaml │ │ ├── config │ │ │ ├── config.go │ │ │ ├── config_test.go │ │ │ ├── flags.go │ │ │ ├── flags_test.go │ │ │ ├── headers.go │ │ │ └── testdata │ │ │ │ ├── agent.yaml │ │ │ │ ├── agentbadconf.yaml │ │ │ │ ├── agentbasiccomponentsallowed.yaml │ │ │ │ ├── agenthttpbasic.yaml │ │ │ │ ├── agentwithdescription.yaml │ │ │ │ ├── agentwithheaders.yaml │ │ │ │ └── kubeconfig.yaml │ │ ├── logger │ │ │ └── logger.go │ │ ├── metrics │ │ │ └── reporter.go │ │ ├── operator │ │ │ ├── client.go │ │ │ ├── client_test.go │ │ │ └── testdata │ │ │ │ ├── collector-v1alpha1.yaml │ │ │ │ ├── collector.yaml │ │ │ │ ├── invalid-collector.yaml │ │ │ │ ├── reporting-collector.yaml │ │ │ │ ├── unmanaged-collector.yaml │ │ │ │ └── updated-collector.yaml │ │ └── proxy │ │ │ ├── agent.go │ │ │ ├── agent_test.go │ │ │ ├── server.go │ │ │ └── server_test.go │ └── main.go └── otel-allocator │ ├── Dockerfile │ ├── README.md │ ├── benchmark_test.go │ ├── internal │ ├── allocation │ │ ├── allocator.go │ │ ├── allocator_test.go │ │ ├── consistent_hashing.go │ │ ├── consistent_hashing_test.go │ │ ├── least_weighted.go │ │ ├── least_weighted_test.go │ │ ├── per_node.go │ │ ├── per_node_test.go │ │ ├── strategy.go │ │ ├── strategy_test.go │ │ └── testutils.go │ ├── collector │ │ ├── collector.go │ │ └── collector_test.go │ ├── config │ │ ├── config.go │ │ ├── config_test.go │ │ ├── flags.go │ │ ├── flags_test.go │ │ └── testdata │ │ │ ├── config_test.yaml │ │ │ ├── file_sd_test.json │ │ │ ├── global_config_test.yaml │ │ │ ├── no_config.yaml │ │ │ ├── pod_service_selector_camelcase_expressions_test.yaml │ │ │ ├── pod_service_selector_camelcase_test.yaml │ │ │ ├── pod_service_selector_expressions_test.yaml │ │ │ └── pod_service_selector_test.yaml │ ├── diff │ │ ├── diff.go │ │ └── diff_test.go │ ├── prehook │ │ ├── prehook.go │ │ ├── relabel.go │ │ └── relabel_test.go │ ├── server │ │ ├── bench_test.go │ │ ├── mocks_test.go │ │ ├── server.go │ │ ├── server_test.go │ │ └── testdata │ │ │ ├── prom-config-all-actions.yaml │ │ │ ├── prom-config-test.yaml │ │ │ └── prom-no-config.yaml │ ├── target │ │ ├── discovery.go │ │ ├── discovery_test.go │ │ ├── target.go │ │ └── testdata │ │ │ ├── test.yaml │ │ │ └── test_update.yaml │ └── watcher │ │ ├── promOperator.go │ │ ├── promOperator_test.go │ │ └── watcher.go │ └── main.go ├── config ├── README.md ├── certmanager │ ├── certificate.yaml │ ├── kustomization.yaml │ └── kustomizeconfig.yaml ├── crd │ ├── bases │ │ ├── opentelemetry.io_instrumentations.yaml │ │ ├── opentelemetry.io_opampbridges.yaml │ │ ├── opentelemetry.io_opentelemetrycollectors.yaml │ │ └── opentelemetry.io_targetallocators.yaml │ ├── kustomization.yaml │ ├── kustomizeconfig.yaml │ └── patches │ │ ├── cainjection_in_opampbridges.yaml │ │ ├── cainjection_in_opentelemetrycollectors.yaml │ │ ├── cainjection_in_targetallocators.yaml │ │ └── webhook_in_opentelemetrycollectors.yaml ├── default │ ├── kustomization.yaml │ ├── manager_auth_proxy_patch.yaml │ ├── manager_webhook_patch.yaml │ └── webhookcainjection_patch.yaml ├── manager │ ├── kustomization.yaml │ └── manager.yaml ├── manifests │ ├── community │ │ ├── bases │ │ │ └── opentelemetry-operator.clusterserviceversion.yaml │ │ └── kustomization.yaml │ ├── kustomization.yaml │ └── openshift │ │ ├── bases │ │ └── opentelemetry-operator.clusterserviceversion.yaml │ │ └── kustomization.yaml ├── overlays │ └── openshift │ │ ├── kustomization.yaml │ │ ├── manager-patch.yaml │ │ ├── manager_auth_proxy_tls_patch.yaml │ │ └── metrics_service_tls_patch.yaml ├── rbac │ ├── _opampbridge_editor_role.yaml │ ├── _opampbridge_viewer_role.yaml │ ├── _targetallocator_editor_role.yaml │ ├── _targetallocator_viewer_role.yaml │ ├── auth_proxy_client_clusterrole.yaml │ ├── auth_proxy_role.yaml │ ├── auth_proxy_role_binding.yaml │ ├── auth_proxy_service.yaml │ ├── kustomization.yaml │ ├── leader_election_role.yaml │ ├── leader_election_role_binding.yaml │ ├── opentelemetrycollector_editor_role.yaml │ ├── opentelemetrycollector_viewer_role.yaml │ ├── role.yaml │ ├── role_binding.yaml │ └── service_account.yaml ├── samples │ ├── _v1alpha1_opampbridge.yaml │ ├── _v1alpha1_targetallocator.yaml │ ├── core_v1alpha1_opentelemetrycollector.yaml │ ├── core_v1beta1_opentelemetrycollector.yaml │ ├── exporter-with-tls.yaml │ ├── instrumentation_v1alpha1_instrumentation.yaml │ ├── kustomization.yaml │ └── sidecar.yaml ├── scorecard │ ├── bases │ │ └── config.yaml │ ├── kustomization.yaml │ └── patches │ │ ├── basic.config.yaml │ │ └── olm.config.yaml └── webhook │ ├── kustomization.yaml │ ├── kustomizeconfig.yaml │ ├── manifests.yaml │ └── service.yaml ├── docs ├── api │ ├── README.md │ ├── instrumentations.md │ ├── opampbridges.md │ ├── opentelemetrycollectors.md │ └── targetallocators.md ├── compatibility.md ├── crd-changelog.md └── rfcs │ ├── README.md │ ├── managed.md │ └── template.md ├── go.mod ├── go.sum ├── hack ├── boilerplate.go.txt ├── check-operator-ready.go ├── ignore-createdAt-bundle.sh ├── install-metrics-server.sh ├── install-prometheus-operator.sh └── install-targetallocator-prometheus-crds.sh ├── internal ├── autodetect │ ├── autodetectutils │ │ └── utils.go │ ├── certmanager │ │ ├── check.go │ │ └── operator.go │ ├── collector │ │ └── operator.go │ ├── fips │ │ └── fipsautodetect.go │ ├── main.go │ ├── main_test.go │ ├── opampbridge │ │ └── operator.go │ ├── openshift │ │ └── routes.go │ ├── prometheus │ │ └── operator.go │ ├── rbac │ │ ├── check.go │ │ └── operator.go │ └── targetallocator │ │ └── operator.go ├── components │ ├── builder.go │ ├── builder_test.go │ ├── component.go │ ├── component_test.go │ ├── exporters │ │ ├── helpers.go │ │ └── helpers_test.go │ ├── extensions │ │ ├── healthcheckv1.go │ │ ├── healthcheckv1_test.go │ │ ├── helpers.go │ │ ├── helpers_test.go │ │ ├── jaeger_query_extension.go │ │ └── jaeger_query_extension_test.go │ ├── generic_parser.go │ ├── generic_parser_test.go │ ├── multi_endpoint.go │ ├── multi_endpoint_test.go │ ├── processors │ │ ├── helpers.go │ │ ├── helpers_test.go │ │ ├── k8sattribute.go │ │ ├── k8sattribute_test.go │ │ ├── resourcedetection.go │ │ └── resourcedetection_test.go │ ├── receivers │ │ ├── helpers.go │ │ ├── k8scluster.go │ │ ├── k8scluster_test.go │ │ ├── k8sevents.go │ │ ├── k8sobjects.go │ │ ├── k8sobjects_test.go │ │ ├── kubeletstats.go │ │ ├── kubeletstats_test.go │ │ ├── multi_endpoint_receiver_test.go │ │ ├── scraper_test.go │ │ └── single_endpoint_receiver_test.go │ ├── single_endpoint.go │ └── single_endpoint_test.go ├── config │ ├── config.go │ ├── config_test.go │ ├── options.go │ ├── tls.go │ └── tls_test.go ├── controllers │ ├── builder_test.go │ ├── common.go │ ├── opampbridge_controller.go │ ├── opampbridge_controller_test.go │ ├── opentelemetrycollector_controller.go │ ├── opentelemetrycollector_reconciler_test.go │ ├── reconcile_test.go │ ├── suite_test.go │ ├── targetallocator_controller.go │ ├── targetallocator_controller_test.go │ ├── targetallocator_reconciler_test.go │ └── testdata │ │ ├── ingress_testdata.yaml │ │ ├── otlp_test.yaml │ │ ├── test.yaml │ │ └── test_ta_update.yaml ├── fips │ ├── fipscheck.go │ └── fipscheck_test.go ├── manifests │ ├── builder.go │ ├── collector │ │ ├── adapters │ │ │ ├── config_from.go │ │ │ └── config_from_test.go │ │ ├── collector.go │ │ ├── collector_test.go │ │ ├── config_replace.go │ │ ├── config_replace_test.go │ │ ├── configmap.go │ │ ├── configmap_test.go │ │ ├── container.go │ │ ├── container_test.go │ │ ├── daemonset.go │ │ ├── daemonset_test.go │ │ ├── deployment.go │ │ ├── deployment_test.go │ │ ├── horizontalpodautoscaler.go │ │ ├── horizontalpodautoscaler_test.go │ │ ├── ingress.go │ │ ├── ingress_test.go │ │ ├── poddisruptionbudget.go │ │ ├── poddisruptionbudget_test.go │ │ ├── podmonitor.go │ │ ├── podmonitor_test.go │ │ ├── rbac.go │ │ ├── rbac_test.go │ │ ├── route.go │ │ ├── route_test.go │ │ ├── service.go │ │ ├── service_test.go │ │ ├── serviceaccount.go │ │ ├── serviceaccount_test.go │ │ ├── servicemonitor.go │ │ ├── servicemonitor_test.go │ │ ├── statefulset.go │ │ ├── statefulset_test.go │ │ ├── suite_test.go │ │ ├── targetallocator.go │ │ ├── targetallocator_test.go │ │ ├── testdata │ │ │ ├── config_expected_targetallocator.yaml │ │ │ ├── http_sd_config_servicemonitor_test.yaml │ │ │ ├── http_sd_config_servicemonitor_test_ta_set.yaml │ │ │ ├── http_sd_config_ta_test.yaml │ │ │ ├── http_sd_config_test.yaml │ │ │ ├── ingress_testdata.yaml │ │ │ ├── pm_crd.go │ │ │ ├── prometheus-exporter.yaml │ │ │ ├── rbac_resourcedetectionprocessor_k8s.yaml │ │ │ ├── rbac_resourcedetectionprocessor_openshift.yaml │ │ │ ├── relabel_config_expected_with_sd_config.yaml │ │ │ ├── relabel_config_original.yaml │ │ │ ├── route_crd.go │ │ │ ├── sm_crd.go │ │ │ └── test.yaml │ │ ├── volume.go │ │ ├── volume_test.go │ │ ├── volumeclaim.go │ │ └── volumeclaim_test.go │ ├── manifestutils │ │ ├── annotations.go │ │ ├── annotations_test.go │ │ ├── dns.go │ │ ├── labels.go │ │ └── labels_test.go │ ├── mutate.go │ ├── mutate_test.go │ ├── opampbridge │ │ ├── annotations.go │ │ ├── annotations_test.go │ │ ├── configmap.go │ │ ├── configmap_test.go │ │ ├── container.go │ │ ├── container_test.go │ │ ├── deployment.go │ │ ├── deployment_test.go │ │ ├── opampbridge.go │ │ ├── service.go │ │ ├── serviceaccount.go │ │ ├── serviceaccount_test.go │ │ ├── volume.go │ │ └── volume_test.go │ ├── params.go │ └── targetallocator │ │ ├── adapters │ │ ├── config_to_prom_config.go │ │ └── config_to_prom_config_test.go │ │ ├── annotations.go │ │ ├── annotations_test.go │ │ ├── certificate.go │ │ ├── certificate_test.go │ │ ├── configmap.go │ │ ├── configmap_test.go │ │ ├── container.go │ │ ├── container_test.go │ │ ├── deployment.go │ │ ├── deployment_test.go │ │ ├── issuer.go │ │ ├── issuer_test.go │ │ ├── poddisruptionbudget.go │ │ ├── poddisruptionbudget_test.go │ │ ├── service.go │ │ ├── service_test.go │ │ ├── serviceaccount.go │ │ ├── serviceaccount_test.go │ │ ├── servicemonitor.go │ │ ├── servicemonitor_test.go │ │ ├── targetallocator.go │ │ ├── testdata │ │ └── test.yaml │ │ ├── volume.go │ │ └── volume_test.go ├── naming │ ├── dns.go │ ├── dns_test.go │ ├── main.go │ ├── port.go │ ├── port_test.go │ ├── triming.go │ └── triming_test.go ├── openshift │ └── dashboards │ │ ├── dashboards.go │ │ └── metrics-dashboard.json ├── operator-metrics │ ├── metrics.go │ └── metrics_test.go ├── rbac │ ├── access.go │ ├── access_test.go │ ├── format.go │ └── format_test.go ├── status │ ├── collector │ │ ├── collector.go │ │ ├── collector_test.go │ │ └── handle.go │ ├── opampbridge │ │ └── handle.go │ └── targetallocator │ │ └── handle.go ├── version │ ├── main.go │ └── main_test.go └── webhook │ └── podmutation │ ├── webhookhandler.go │ ├── webhookhandler_suite_test.go │ └── webhookhandler_test.go ├── kind-1.23.yaml ├── kind-1.24.yaml ├── kind-1.25.yaml ├── kind-1.26.yaml ├── kind-1.27.yaml ├── kind-1.28.yaml ├── kind-1.29.yaml ├── kind-1.30.yaml ├── kind-1.31.yaml ├── kind-1.32.yaml ├── kind-1.33.yaml ├── main.go ├── pkg ├── collector │ └── upgrade │ │ ├── noop.go │ │ ├── suite_test.go │ │ ├── testdata │ │ ├── v0_61_0-invalid.yaml │ │ └── v0_61_0-valid.yaml │ │ ├── upgrade.go │ │ ├── upgrade_test.go │ │ ├── v0_104_0.go │ │ ├── v0_104_0_test.go │ │ ├── v0_105_0.go │ │ ├── v0_105_0_test.go │ │ ├── v0_110_0.go │ │ ├── v0_110_0_test.go │ │ ├── v0_111_0.go │ │ ├── v0_111_0_test.go │ │ ├── v0_122_0.go │ │ ├── v0_122_0_test.go │ │ ├── v0_15_0.go │ │ ├── v0_15_0_test.go │ │ ├── v0_19_0.go │ │ ├── v0_19_0_test.go │ │ ├── v0_24_0.go │ │ ├── v0_24_0_test.go │ │ ├── v0_2_10.go │ │ ├── v0_31_0.go │ │ ├── v0_31_0_test.go │ │ ├── v0_36_0.go │ │ ├── v0_36_0_test.go │ │ ├── v0_38_0.go │ │ ├── v0_38_0_test.go │ │ ├── v0_39_0.go │ │ ├── v0_39_0_test.go │ │ ├── v0_41_0.go │ │ ├── v0_41_0_test.go │ │ ├── v0_43_0.go │ │ ├── v0_43_0_test.go │ │ ├── v0_56_0.go │ │ ├── v0_56_0_test.go │ │ ├── v0_57_2.go │ │ ├── v0_57_2_test.go │ │ ├── v0_61_0.go │ │ ├── v0_61_0_test.go │ │ ├── v0_9_0.go │ │ ├── v0_9_0_test.go │ │ └── versions.go ├── constants │ └── env.go ├── featuregate │ ├── featuregate.go │ └── featuregate_test.go ├── instrumentation │ ├── annotation.go │ ├── annotation_test.go │ ├── apachehttpd.go │ ├── apachehttpd_test.go │ ├── dotnet.go │ ├── dotnet_test.go │ ├── exporter.go │ ├── exporter_test.go │ ├── golang.go │ ├── golang_test.go │ ├── helper.go │ ├── helper_test.go │ ├── instrumentation_suite_test.go │ ├── javaagent.go │ ├── javaagent_test.go │ ├── nginx.go │ ├── nginx_test.go │ ├── nodejs.go │ ├── nodejs_test.go │ ├── podmutator.go │ ├── podmutator_test.go │ ├── python.go │ ├── python_test.go │ ├── sdk.go │ ├── sdk_test.go │ └── upgrade │ │ ├── upgrade.go │ │ ├── upgrade_suite_test.go │ │ └── upgrade_test.go └── sidecar │ ├── annotation.go │ ├── annotation_test.go │ ├── attributes.go │ ├── attributes_test.go │ ├── pod.go │ ├── pod_test.go │ └── podmutator.go ├── renovate.json ├── tests ├── e2e-automatic-rbac │ ├── extra-permissions-operator │ │ ├── clusterresourcequotas.yaml │ │ ├── cronjobs.yaml │ │ ├── daemonsets.yaml │ │ ├── events.yaml │ │ ├── extensions.yaml │ │ ├── namespaces-status.yaml │ │ ├── namespaces.yaml │ │ ├── nodes-proxy.yaml │ │ ├── nodes-spec.yaml │ │ ├── nodes.yaml │ │ ├── pod-status.yaml │ │ ├── rbac.yaml │ │ ├── replicaset.yaml │ │ ├── replicationcontrollers.yaml │ │ └── resourcequotas.yaml │ ├── processor-k8sattributes │ │ ├── 00-install.yaml │ │ ├── 01-assert.yaml │ │ ├── 01-install.yaml │ │ ├── 02-assert.yaml │ │ ├── 02-install.yaml │ │ └── chainsaw-test.yaml │ ├── processor-resourcedetection │ │ ├── 00-install.yaml │ │ ├── 01-assert.yaml │ │ ├── 01-install.yaml │ │ ├── 02-assert.yaml │ │ ├── 02-install.yaml │ │ └── chainsaw-test.yaml │ ├── receiver-k8scluster │ │ ├── 00-install.yaml │ │ ├── 01-assert.yaml │ │ ├── 01-install.yaml │ │ ├── 02-assert.yaml │ │ └── 02-install.yaml │ ├── receiver-k8sevents │ │ ├── 00-install.yaml │ │ ├── 01-assert.yaml │ │ ├── 01-install.yaml │ │ └── chainsaw-test.yaml │ ├── receiver-k8sobjects │ │ ├── 00-install.yaml │ │ ├── 01-assert.yaml │ │ ├── 01-install.yaml │ │ └── chainsaw-test.yaml │ └── receiver-kubeletstats │ │ ├── 00-install.yaml │ │ ├── 01-assert.yaml │ │ ├── 01-install.yaml │ │ ├── 02-assert.yaml │ │ ├── 02-install.yaml │ │ ├── 03-assert.yaml │ │ ├── 03-install.yaml │ │ └── chainsaw-test.yaml ├── e2e-autoscale │ └── autoscale │ │ ├── 00-assert.yaml │ │ ├── 00-install.yaml │ │ ├── 01-assert.yaml │ │ ├── 01-install.yaml │ │ ├── 02-assert.yaml │ │ ├── 02-install.yaml │ │ ├── 03-assert.yaml │ │ ├── 04-error.yaml │ │ ├── 04-install.yaml │ │ └── chainsaw-test.yaml ├── e2e-crd-validations │ └── sidecar │ │ ├── 00-install-priority-class.yaml │ │ ├── 01-install-tolerations.yaml │ │ ├── 02-install-affinity.yaml │ │ ├── 03-install-additional-containers.yaml │ │ └── chainsaw-test.yaml ├── e2e-instrumentation │ ├── instrumentation-apache-httpd │ │ ├── 00-install-collector.yaml │ │ ├── 00-install-instrumentation.yaml │ │ ├── 01-assert.yaml │ │ ├── 01-install-app.yaml │ │ └── chainsaw-test.yaml │ ├── instrumentation-apache-multicontainer │ │ ├── 00-install-collector.yaml │ │ ├── 00-install-instrumentation.yaml │ │ ├── 01-assert.yaml │ │ ├── 01-install-app.yaml │ │ ├── 02-assert.yaml │ │ ├── 02-install-app.yaml │ │ └── chainsaw-test.yaml │ ├── instrumentation-dotnet-multicontainer │ │ ├── 00-install-collector.yaml │ │ ├── 00-install-instrumentation.yaml │ │ ├── 01-assert.yaml │ │ ├── 01-install-app.yaml │ │ ├── 02-assert.yaml │ │ ├── 02-install-app.yaml │ │ └── chainsaw-test.yaml │ ├── instrumentation-dotnet-musl │ │ ├── 00-install-collector.yaml │ │ ├── 00-install-instrumentation.yaml │ │ ├── 01-assert.yaml │ │ ├── 01-install-app.yaml │ │ └── chainsaw-test.yaml │ ├── instrumentation-dotnet │ │ ├── 00-install-collector.yaml │ │ ├── 00-install-instrumentation.yaml │ │ ├── 01-assert.yaml │ │ ├── 01-install-app.yaml │ │ └── chainsaw-test.yaml │ ├── instrumentation-go │ │ ├── 00-install-collector.yaml │ │ ├── 00-install-instrumentation.yaml │ │ ├── 01-add-scc.yaml │ │ ├── 02-assert.yaml │ │ ├── 02-install-app.yaml │ │ ├── add-scc.sh │ │ ├── chainsaw-test.yaml │ │ └── scc.yaml │ ├── instrumentation-java-multicontainer │ │ ├── 00-install-collector.yaml │ │ ├── 00-install-instrumentation.yaml │ │ ├── 01-assert.yaml │ │ ├── 01-install-app.yaml │ │ ├── 02-assert.yaml │ │ ├── 02-install-app.yaml │ │ └── chainsaw-test.yaml │ ├── instrumentation-java-other-ns │ │ ├── 02-install-collector.yaml │ │ ├── 02-install-instrumentation.yaml │ │ ├── 03-assert.yaml │ │ ├── 03-install-app.yaml │ │ └── chainsaw-test.yaml │ ├── instrumentation-java-tls │ │ ├── .gitignore │ │ ├── 00-install-collector.yaml │ │ ├── 00-install-instrumentation.yaml │ │ ├── 01-assert.yaml │ │ ├── 01-install-app.yaml │ │ ├── ca.yaml │ │ ├── chainsaw-test.yaml │ │ ├── client-secret.yaml │ │ ├── generate-certs.sh │ │ └── server-secret.yaml │ ├── instrumentation-java │ │ ├── 00-install-collector.yaml │ │ ├── 00-install-instrumentation.yaml │ │ ├── 01-assert.yaml │ │ ├── 01-install-app.yaml │ │ └── chainsaw-test.yaml │ ├── instrumentation-nginx-contnr-secctx │ │ ├── 00-install-collector.yaml │ │ ├── 00-install-instrumentation.yaml │ │ ├── 01-assert.yaml │ │ ├── 01-install-app.yaml │ │ └── chainsaw-test.yaml │ ├── instrumentation-nginx-multicontainer │ │ ├── 00-install-collector.yaml │ │ ├── 00-install-instrumentation.yaml │ │ ├── 01-assert.yaml │ │ ├── 01-install-app.yaml │ │ ├── 02-assert.yaml │ │ ├── 02-install-app.yaml │ │ └── chainsaw-test.yaml │ ├── instrumentation-nginx │ │ ├── 00-install-collector.yaml │ │ ├── 00-install-instrumentation.yaml │ │ ├── 01-assert.yaml │ │ ├── 01-install-app.yaml │ │ └── chainsaw-test.yaml │ ├── instrumentation-nodejs-multicontainer │ │ ├── 00-install-collector.yaml │ │ ├── 00-install-instrumentation.yaml │ │ ├── 01-assert.yaml │ │ ├── 01-install-app.yaml │ │ ├── 02-assert.yaml │ │ ├── 02-install-app.yaml │ │ └── chainsaw-test.yaml │ ├── instrumentation-nodejs-volume │ │ ├── 00-install-collector.yaml │ │ ├── 00-install-instrumentation.yaml │ │ ├── 01-assert.yaml │ │ ├── 01-install-app.yaml │ │ └── chainsaw-test.yaml │ ├── instrumentation-nodejs │ │ ├── 00-install-collector.yaml │ │ ├── 00-install-instrumentation.yaml │ │ ├── 01-assert.yaml │ │ ├── 01-install-app.yaml │ │ └── chainsaw-test.yaml │ ├── instrumentation-python-multicontainer │ │ ├── 00-install-collector.yaml │ │ ├── 00-install-instrumentation.yaml │ │ ├── 01-assert.yaml │ │ ├── 01-install-app.yaml │ │ ├── 02-assert.yaml │ │ ├── 02-install-app.yaml │ │ └── chainsaw-test.yaml │ ├── instrumentation-python-musl │ │ ├── 00-install-collector.yaml │ │ ├── 00-install-instrumentation.yaml │ │ ├── 01-assert.yaml │ │ ├── 01-install-app.yaml │ │ └── chainsaw-test.yaml │ ├── instrumentation-python │ │ ├── 00-install-collector.yaml │ │ ├── 00-install-instrumentation.yaml │ │ ├── 01-assert.yaml │ │ ├── 01-install-app.yaml │ │ └── chainsaw-test.yaml │ └── instrumentation-sdk │ │ ├── 00-install-collector.yaml │ │ ├── 00-install-instrumentation.yaml │ │ ├── 01-assert.yaml │ │ ├── 01-install-app.yaml │ │ └── chainsaw-test.yaml ├── e2e-metadata-filters │ ├── annotations │ │ ├── 00-error.yaml │ │ ├── 00-install.yaml │ │ ├── 01-error.yaml │ │ ├── 01-patch.yaml │ │ └── chainsaw-test.yaml │ └── labels │ │ ├── 00-error.yaml │ │ ├── 00-install.yaml │ │ ├── 01-error.yaml │ │ ├── 01-patch.yaml │ │ └── chainsaw-test.yaml ├── e2e-multi-instrumentation │ ├── instrumentation-multi-multicontainer-go │ │ ├── 00-install-collector.yaml │ │ ├── 00-install-instrumentation.yaml │ │ ├── 01-add-scc.yaml │ │ ├── 02-assert.yaml │ │ ├── 02-install-app.yaml │ │ ├── add-scc.sh │ │ ├── chainsaw-test.yaml │ │ └── scc.yaml │ ├── instrumentation-multi-multicontainer │ │ ├── 00-install-collector.yaml │ │ ├── 00-install-instrumentation.yaml │ │ ├── 01-assert.yaml │ │ ├── 01-install-app.yaml │ │ └── chainsaw-test.yaml │ ├── instrumentation-multi-no-containers │ │ ├── 00-install-collector.yaml │ │ ├── 00-install-instrumentation.yaml │ │ ├── 01-assert.yaml │ │ ├── 01-install-app.yaml │ │ └── chainsaw-test.yaml │ └── instrumentation-single-instr-first-container │ │ ├── 00-install-collector.yaml │ │ ├── 00-install-instrumentation.yaml │ │ ├── 01-assert.yaml │ │ ├── 01-install-app.yaml │ │ └── chainsaw-test.yaml ├── e2e-native-sidecar │ ├── 00-assert.yaml │ ├── 00-install.yaml │ └── chainsaw-test.yaml ├── e2e-opampbridge │ └── opampbridge │ │ ├── 00-assert.yaml │ │ ├── 00-install.yaml │ │ ├── 01-assert.yaml │ │ ├── 01-install.yaml │ │ ├── 02-assert.yaml │ │ ├── 02-install.yaml │ │ └── chainsaw-test.yaml ├── e2e-openshift │ ├── Dockerfile │ ├── export-to-cluster-logging-lokistack │ │ ├── chainsaw-test.yaml │ │ ├── check_logs.sh │ │ ├── generate-logs-assert.yaml │ │ ├── generate-logs.yaml │ │ ├── install-loki-assert.yaml │ │ ├── install-loki.yaml │ │ ├── install-minio-assert.yaml │ │ ├── install-minio.yaml │ │ ├── logging-uiplugin-assert.yaml │ │ ├── logging-uiplugin.yaml │ │ ├── otel-collector-assert.yaml │ │ └── otel-collector.yaml │ ├── kafka │ │ ├── 00-assert.yaml │ │ ├── 00-create-kafka-instance.yaml │ │ ├── 01-assert.yaml │ │ ├── 01-create-kafka-topics.yaml │ │ ├── 02-assert.yaml │ │ ├── 02-otel-kakfa-receiver.yaml │ │ ├── 03-assert.yaml │ │ ├── 03-otel-kakfa-exporter.yaml │ │ ├── 04-assert.yaml │ │ ├── 04-generate-traces.yaml │ │ ├── chainsaw-test.yaml │ │ └── check_traces.sh │ ├── monitoring │ │ ├── 00-assert.yaml │ │ ├── 00-workload-monitoring.yaml │ │ ├── 01-assert.yaml │ │ ├── 01-otel-collector.yaml │ │ ├── 02-assert.yaml │ │ ├── 02-generate-telemetry.yaml │ │ ├── 03-assert.yaml │ │ ├── 03-create-monitoring-roles.yaml │ │ ├── 04-assert.yaml │ │ ├── 04-use-prometheus-exporter.yaml │ │ ├── chainsaw-test.yaml │ │ └── check_metrics.sh │ ├── multi-cluster │ │ ├── 00-assert.yaml │ │ ├── 00-create-namespaces.yaml │ │ ├── 01-assert.yaml │ │ ├── 01-create-tempo.yaml │ │ ├── 02-assert.yaml │ │ ├── 02-otlp-receiver.yaml │ │ ├── 03-assert.yaml │ │ ├── 03-otlp-sender.yaml │ │ ├── 04-assert.yaml │ │ ├── 04-generate-traces.yaml │ │ ├── assert-verify-traces.yaml │ │ ├── chainsaw-test.yaml │ │ ├── create_otlp_sender.sh │ │ ├── generate_certs.sh │ │ └── verify-traces.yaml │ ├── must-gather │ │ ├── assert-install-app.yaml │ │ ├── assert-install-target-allocator.yaml │ │ ├── chainsaw-test.yaml │ │ ├── check_must_gather.sh │ │ ├── install-app.yaml │ │ ├── install-collector-sidecar.yaml │ │ ├── install-instrumentation.yaml │ │ └── install-target-allocator.yaml │ ├── otlp-metrics-traces │ │ ├── 00-assert.yaml │ │ ├── 00-install-tempo.yaml │ │ ├── 01-assert.yaml │ │ ├── 01-workload-monitoring.yaml │ │ ├── 02-assert.yaml │ │ ├── 02-otel-metrics-collector.yaml │ │ ├── 03-assert.yaml │ │ ├── 03-metrics-traces-gen.yaml │ │ ├── assert-verify-traces.yaml │ │ ├── chainsaw-test.yaml │ │ ├── check_metrics.sh │ │ ├── check_must_gather.sh │ │ └── verify-traces.yaml │ ├── route │ │ ├── 00-assert.yaml │ │ ├── 00-install.yaml │ │ └── chainsaw-test.yaml │ └── scrape-in-cluster-monitoring │ │ ├── chainsaw-test.yaml │ │ ├── check_logs.sh │ │ ├── create-clusterrolebinding-assert.yaml │ │ ├── create-clusterrolebinding.yaml │ │ ├── create-otel-instance-assert.yaml │ │ └── create-otel-instance.yaml ├── e2e-pdb │ ├── pdb │ │ ├── 00-assert.yaml │ │ ├── 00-install.yaml │ │ ├── 01-assert.yaml │ │ ├── 01-install.yaml │ │ └── chainsaw-test.yaml │ └── target-allocator │ │ ├── 00-assert.yaml │ │ ├── 00-install.yaml │ │ └── chainsaw-test.yaml ├── e2e-prometheuscr │ ├── create-pm-prometheus-exporters │ │ ├── 00-install.yaml │ │ ├── 01-assert.yaml │ │ ├── 01-install-app.yaml │ │ └── chainsaw-test.yaml │ └── create-sm-prometheus-exporters │ │ ├── 00-install.yaml │ │ ├── 01-assert.yaml │ │ ├── 01-install.yaml │ │ ├── 02-assert.yaml │ │ ├── 02-install.yaml │ │ ├── 03-assert.yaml │ │ ├── 03-install.yaml │ │ ├── 04-error.yaml │ │ ├── 04-install.yaml │ │ ├── 05-assert.yaml │ │ ├── 05-error.yaml │ │ ├── 05-install.yaml │ │ ├── 06-assert.yaml │ │ ├── 06-install.yaml │ │ ├── 07-error.yaml │ │ ├── 08-assert.yaml │ │ ├── 08-install.yaml │ │ └── chainsaw-test.yaml ├── e2e-ta-collector-mtls │ ├── certmanager-permissions │ │ └── certmanager.yaml │ ├── ta-collector-mtls │ │ ├── 00-assert.yaml │ │ ├── 00-install.yaml │ │ ├── 01-assert.yaml │ │ ├── 01-install.yaml │ │ ├── 02-assert.yaml │ │ ├── 02-install.yaml │ │ └── chainsaw-test.yaml │ └── ta-disabled │ │ ├── 00-assert.yaml │ │ ├── 00-install.yaml │ │ └── chainsaw-test.yaml ├── e2e-targetallocator-cr │ ├── 00-assert.yaml │ ├── 00-install.yaml │ ├── 01-assert.yaml │ ├── 01-install.yaml │ ├── chainsaw-test.yaml │ └── targetallocator-label │ │ ├── 00-assert.yaml │ │ ├── 00-install.yaml │ │ ├── 01-add-ta-label.yaml │ │ ├── 01-assert.yaml │ │ ├── 02-assert.yaml │ │ ├── 02-change-collector-config.yaml │ │ ├── 03-assert.yaml │ │ └── chainsaw-test.yaml ├── e2e-targetallocator │ ├── targetallocator-features │ │ ├── 00-assert.yaml │ │ ├── 00-install.yaml │ │ ├── 01-assert.yaml │ │ ├── 02-assert.yaml │ │ ├── 02-install.yaml │ │ └── chainsaw-test.yaml │ ├── targetallocator-kubernetessd │ │ ├── 00-assert.yaml │ │ ├── 00-install.yaml │ │ ├── 01-assert.yaml │ │ ├── 01-install.yaml │ │ └── chainsaw-test.yaml │ ├── targetallocator-namespace │ │ ├── assert-job-failed.yaml │ │ ├── assert-job-succeeded.yaml │ │ ├── assert-workloads-ready.yaml │ │ ├── chainsaw-test.yaml │ │ └── resources │ │ │ ├── clusterrbac.yaml │ │ │ ├── job-check-metrics.yaml │ │ │ ├── otelcol.yaml │ │ │ ├── rbac.yaml │ │ │ ├── serviceaccounts.yaml │ │ │ ├── ta-allow-namespaces.yaml │ │ │ └── ta-deny-namespaces.yaml │ └── targetallocator-prometheuscr │ │ ├── 00-assert.yaml │ │ ├── 00-install.yaml │ │ ├── 01-assert.yaml │ │ ├── 01-install.yaml │ │ ├── 02-assert.yaml │ │ ├── 02-install.yaml │ │ └── chainsaw-test.yaml ├── e2e-upgrade │ └── upgrade-test │ │ ├── 00-assert.yaml │ │ ├── 00-install.yaml │ │ ├── 02-assert.yaml │ │ ├── 02-upgrade-collector.yaml │ │ ├── chainsaw-test.yaml │ │ └── opentelemetry-operator-v0.86.0.yaml ├── e2e │ ├── additional-containers-collector │ │ ├── 00-assert-daemonset-without-additional-containers.yaml │ │ ├── 00-assert-deployment-without-additional-containers.yaml │ │ ├── 00-assert-statefulset-without-additional-containers.yaml │ │ ├── 00-install-collectors-without-additional-containers.yaml │ │ ├── 01-assert-daemonset-with-additional-containers.yaml │ │ ├── 01-assert-deployment-with-additional-containers.yaml │ │ ├── 01-assert-statefulset-with-additional-containers.yaml │ │ ├── 01-install-collectors-with-additional-containers.yaml │ │ ├── 02-assert-daemonset-with-modified-additional-containers.yaml │ │ ├── 02-assert-deployment-with-modified-additional-containers.yaml │ │ ├── 02-assert-statefulset-with-modified-additional-containers.yaml │ │ ├── 02-modify-collectors-additional-containers.yaml │ │ └── chainsaw-test.yaml │ ├── affinity-collector │ │ ├── 00-assert-daemonset-without-affinity.yaml │ │ ├── 00-assert-deployment-without-affinity.yaml │ │ ├── 00-assert-statefulset-without-affinity.yaml │ │ ├── 00-install-collectors-without-affinity.yaml │ │ ├── 01-assert-daemonset-with-affinity.yaml │ │ ├── 01-assert-deployment-with-affinity.yaml │ │ ├── 01-assert-statefulset-with-affinity.yaml │ │ ├── 01-install-collectors-with-affinity.yaml │ │ ├── 02-assert-daemonset-with-modified-affinity.yaml │ │ ├── 02-assert-deployment-with-modified-affinity.yaml │ │ ├── 02-assert-statefulset-with-modified-affinity.yaml │ │ ├── 02-modify-collectors-affinity.yaml │ │ └── chainsaw-test.yaml │ ├── annotation-change-collector │ │ ├── 00-assert-daemonset-with-extra-annotation.yaml │ │ ├── 00-assert-deployment-with-extra-annotation.yaml │ │ ├── 00-assert-statefulset-with-extra-annotation.yaml │ │ ├── 00-install-collectors-with-extra-annotation.yaml │ │ ├── 01-assert-daemonset-with-annotation-change.yaml │ │ ├── 01-assert-deployment-with-annotation-change.yaml │ │ ├── 01-assert-statefulset-with-annotation-change.yaml │ │ ├── 01-install-collectors-with-annotation-change.yaml │ │ ├── 02-assert-daemonset-without-extra-annotation.yaml │ │ ├── 02-assert-deployment-without-extra-annotation.yaml │ │ ├── 02-assert-statefulset-without-extra-annotation.yaml │ │ ├── 02-install-collectors-without-extra-annotation.yaml │ │ ├── 02-manual-annotation-resources.yaml │ │ └── chainsaw-test.yaml │ ├── args-collector │ │ ├── 00-assert-daemonset-without-args.yaml │ │ ├── 00-assert-deployment-without-args.yaml │ │ ├── 00-assert-statefulset-without-args.yaml │ │ ├── 00-install-collectors-without-args.yaml │ │ ├── 01-assert-daemonset-with-args.yaml │ │ ├── 01-assert-deployment-with-args.yaml │ │ ├── 01-assert-statefulset-with-args.yaml │ │ ├── 01-install-collectors-with-args.yaml │ │ ├── 02-assert-daemonset-with-modified-args.yaml │ │ ├── 02-assert-deployment-with-modified-args.yaml │ │ ├── 02-assert-statefulset-with-modified-args.yaml │ │ ├── 02-modify-collectors-args.yaml │ │ └── chainsaw-test.yaml │ ├── daemonset-features │ │ ├── 01-install.yaml │ │ ├── 02-assert.yaml │ │ ├── 03-assert.yaml │ │ ├── add-sa-collector.sh │ │ ├── add-scc-openshift.sh │ │ ├── chainsaw-test.yaml │ │ └── scc.yaml │ ├── env-vars │ │ ├── 00-install-collector.yaml │ │ ├── 00-install-instrumentation.yaml │ │ ├── 01-assert.yaml │ │ ├── 01-deployment.yaml │ │ ├── 02-assert.yaml │ │ ├── 02-cronjob.yaml │ │ ├── 03-assert.yaml │ │ ├── 03-job.yaml │ │ └── chainsaw-test.yaml │ ├── extension │ │ ├── 00-assert-jaeger-extension.yaml │ │ ├── 00-install-jaeger-extension.yaml │ │ ├── 01-assert-health-check-extension.yaml │ │ ├── 01-install-health-check-extension.yaml │ │ └── chainsaw-test.yaml │ ├── ingress-subdomains │ │ ├── 00-assert.yaml │ │ ├── 00-install.yaml │ │ └── chainsaw-test.yaml │ ├── ingress │ │ ├── 00-assert.yaml │ │ ├── 00-install.yaml │ │ ├── 01-error.yaml │ │ ├── 01-remove-ingress.yaml │ │ └── chainsaw-test.yaml │ ├── label-change-collector │ │ ├── 00-assert-daemonset-with-extra-label.yaml │ │ ├── 00-assert-deployment-with-extra-label.yaml │ │ ├── 00-assert-statefulset-with-extra-label.yaml │ │ ├── 00-install-collectors-with-extra-label.yaml │ │ ├── 01-assert-daemonset-with-label-change.yaml │ │ ├── 01-assert-deployment-with-label-change.yaml │ │ ├── 01-assert-statefulset-with-label-change.yaml │ │ ├── 01-install-collectors-with-label-change.yaml │ │ ├── 02-assert-daemonset-without-extra-label.yaml │ │ ├── 02-assert-deployment-without-extra-label.yaml │ │ ├── 02-assert-statefulset-without-extra-label.yaml │ │ ├── 02-install-collectors-without-extra-label.yaml │ │ ├── 02-manual-labeling-resources.yaml │ │ └── chainsaw-test.yaml │ ├── managed-reconcile │ │ ├── 00-assert.yaml │ │ ├── 00-install.yaml │ │ ├── 01-assert.yaml │ │ ├── 01-disable-reconciliation.yaml │ │ ├── 02-assert.yaml │ │ ├── 02-enable-reconciliation.yaml │ │ └── chainsaw-test.yaml │ ├── multiple-configmaps │ │ ├── 00-assert.yaml │ │ ├── 00-install.yaml │ │ └── chainsaw-test.yaml │ ├── node-selector-collector │ │ ├── 00-assert-daemonset-without-node-selector.yaml │ │ ├── 00-assert-deployment-without-node-selector.yaml │ │ ├── 00-assert-statefulset-without-node-selector.yaml │ │ ├── 00-install-collectors-without-node-selector.yaml │ │ ├── 01-assert-daemonset-with-node-selector.yaml │ │ ├── 01-assert-deployment-with-node-selector.yaml │ │ ├── 01-assert-statefulset-with-node-selector.yaml │ │ ├── 01-install-collectors-with-node-selector.yaml │ │ └── chainsaw-test.yaml │ ├── operator-metrics │ │ ├── assert-operator.yaml │ │ └── chainsaw-test.yaml │ ├── operator-restart │ │ ├── assert-operator-pod.yaml │ │ └── chainsaw-test.yaml │ ├── prometheus-config-validation │ │ ├── 00-assert.yaml │ │ ├── 00-promreceiver-allocatorconfig.yaml │ │ ├── 01-assert.yaml │ │ ├── 01-promreceiver-labeldrop.yaml │ │ ├── 02-assert.yaml │ │ ├── 02-promreceiver-allocatorconfig-extra.yaml │ │ ├── 03-assert.yaml │ │ ├── 03-promreceiver-nopromconfig.yaml │ │ └── chainsaw-test.yaml │ ├── smoke-daemonset │ │ ├── 00-assert.yaml │ │ ├── 00-install.yaml │ │ └── chainsaw-test.yaml │ ├── smoke-deletion │ │ ├── 00-assert.yaml │ │ ├── 00-install.yaml │ │ ├── 01-assert.yaml │ │ ├── 01-install.yaml │ │ └── chainsaw-test.yaml │ ├── smoke-dns-config │ │ ├── 00-assert.yaml │ │ ├── 00-install.yaml │ │ └── chainsaw-test.yaml │ ├── smoke-init-containers │ │ ├── 00-assert.yaml │ │ ├── 00-install.yaml │ │ └── chainsaw-test.yaml │ ├── smoke-ip-families │ │ ├── 00-assert.yaml │ │ ├── 00-install.yaml │ │ ├── 01-assert.yaml │ │ ├── 01-install.yaml │ │ └── chainsaw-test.yaml │ ├── smoke-pod-annotations │ │ ├── 00-assert.yaml │ │ ├── 00-install.yaml │ │ └── chainsaw-test.yaml │ ├── smoke-pod-labels │ │ ├── 00-assert.yaml │ │ ├── 00-install.yaml │ │ └── chainsaw-test.yaml │ ├── smoke-ports │ │ ├── 00-assert.yaml │ │ ├── 00-install.yaml │ │ ├── 01-assert.yaml │ │ ├── 01-install.yaml │ │ └── chainsaw-test.yaml │ ├── smoke-restarting-deployment │ │ ├── 00-assert.yaml │ │ ├── 00-errors.yaml │ │ ├── 00-install.yaml │ │ ├── 01-assert-second-config.yaml │ │ ├── 01-install-second-config.yaml │ │ └── chainsaw-test.yaml │ ├── smoke-shareprocessnamespace │ │ ├── 00-assert.yaml │ │ ├── 00-install.yaml │ │ └── chainsaw-test.yaml │ ├── smoke-sidecar-other-namespace │ │ ├── 00-install.yaml │ │ ├── 01-assert.yaml │ │ ├── 01-install-app.yaml │ │ └── chainsaw-test.yaml │ ├── smoke-sidecar │ │ ├── 00-install.yaml │ │ ├── 01-assert.yaml │ │ ├── 01-install-app.yaml │ │ └── chainsaw-test.yaml │ ├── smoke-simplest-v1beta1 │ │ ├── 00-assert.yaml │ │ ├── 00-install.yaml │ │ └── chainsaw-test.yaml │ ├── smoke-simplest │ │ ├── 00-assert.yaml │ │ ├── 00-install.yaml │ │ └── chainsaw-test.yaml │ ├── smoke-statefulset │ │ ├── 00-assert.yaml │ │ ├── 00-install.yaml │ │ └── chainsaw-test.yaml │ ├── smoke-targetallocator │ │ ├── 00-assert.yaml │ │ ├── 00-install.yaml │ │ ├── 01-change-ta-config.yaml │ │ ├── 01-error.yaml │ │ └── chainsaw-test.yaml │ ├── statefulset-features │ │ ├── 00-assert.yaml │ │ ├── 00-install.yaml │ │ ├── 01-assert.yaml │ │ ├── 01-update-volume-claim-templates.yaml │ │ └── chainsaw-test.yaml │ ├── use-labels-for-resource-attributes │ │ ├── 00-install-collector.yaml │ │ ├── 00-install-instrumentation.yaml │ │ ├── 01-assert.yaml │ │ ├── 01-deployment.yaml │ │ └── chainsaw-test.yaml │ ├── versioned-configmaps │ │ ├── 00-assert.yaml │ │ ├── 00-install.yaml │ │ ├── 01-assert.yaml │ │ ├── 01-update.yaml │ │ ├── 02-error.yaml │ │ ├── 02-update.yaml │ │ └── chainsaw-test.yaml │ └── volume-claim-label │ │ ├── 00-assert.yaml │ │ ├── 00-install.yaml │ │ ├── 01-assert.yaml │ │ ├── 01-update-volume-claim-template-labels.yaml │ │ └── chainsaw-test.yaml └── test-e2e-apps │ ├── apache-httpd │ └── Dockerfile │ ├── bridge-server │ ├── Dockerfile │ ├── data │ │ ├── agent.go │ │ └── agents.go │ ├── go.mod │ ├── go.sum │ ├── main.go │ └── opampsrv │ │ ├── logger.go │ │ └── opampsrv.go │ ├── dotnet │ ├── DiceRoller │ │ ├── DiceRoller.csproj │ │ └── Program.cs │ └── Dockerfile │ ├── golang │ ├── Dockerfile │ └── main.go │ ├── java │ ├── DemoApplication.java │ ├── Dockerfile │ └── build.gradle │ ├── metrics-basic-auth │ ├── Dockerfile │ ├── README.md │ ├── app.py │ └── requirements.txt │ ├── nodejs │ ├── Dockerfile │ ├── app.js │ └── package.json │ ├── python │ ├── Dockerfile │ ├── app.py │ └── requirements.txt │ └── scripts │ └── check_pod_logs.sh └── versions.txt /.chainsaw.yaml: -------------------------------------------------------------------------------- 1 | # yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/configuration-chainsaw-v1alpha1.json 2 | apiVersion: chainsaw.kyverno.io/v1alpha1 3 | kind: Configuration 4 | metadata: 5 | name: configuration 6 | spec: 7 | parallel: 4 8 | timeouts: 9 | assert: 6m0s 10 | cleanup: 5m0s 11 | delete: 5m0s 12 | error: 5m0s 13 | apply: 15s 14 | exec: 15s 15 | -------------------------------------------------------------------------------- /.ci/create-release-github.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | NOTES_FILE=/tmp/notes.md 4 | # Note: We expect the versions to not have the `v` prefix here 5 | sed -n "/${DESIRED_VERSION}/,/${CURRENT_VERSION}/{/${CURRENT_VERSION}/!p;}" CHANGELOG.md >${NOTES_FILE} 6 | 7 | gh config set prompt disabled 8 | gh release create \ 9 | -t "Release v${DESIRED_VERSION}" \ 10 | --notes-file ${NOTES_FILE} \ 11 | --draft \ 12 | "v${DESIRED_VERSION}" \ 13 | 'dist/opentelemetry-operator.yaml#Installation manifest for Kubernetes' \ 14 | 'dist/opentelemetry-operator-openshift.yaml#Installation manifest for OpenShift' 15 | -------------------------------------------------------------------------------- /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | ##################################################### 2 | # 3 | # List of approvers for OpenTelemetry Collector Kubernetes Operator 4 | # 5 | ##################################################### 6 | # 7 | # Learn about membership in OpenTelemetry community: 8 | # https://github.com/open-telemetry/community/blob/main/community-membership.md 9 | # 10 | # 11 | # Learn about CODEOWNERS file format: 12 | # https://help.github.com/en/articles/about-code-owners 13 | # 14 | 15 | # Global owners, will be the owners for everything in the repo. 16 | * @open-telemetry/operator-approvers 17 | 18 | # AutoInstrumentation owners 19 | # TBD 20 | -------------------------------------------------------------------------------- /.github/pull_request_template.md: -------------------------------------------------------------------------------- 1 | **Description:** 2 | 4 | 5 | **Link to tracking Issue(s):** 6 | 7 | - Resolves: #issue-number 8 | 9 | **Testing:** 10 | 11 | **Documentation:** 12 | -------------------------------------------------------------------------------- /.github/workflows/check_links_config.json: -------------------------------------------------------------------------------- 1 | { 2 | "ignorePatterns": [ 3 | { 4 | "pattern": "http(s)?://\\d+\\.\\d+\\.\\d+\\.\\d+" 5 | }, 6 | { 7 | "pattern": "http(s)?://localhost" 8 | }, 9 | { 10 | "pattern": "http(s)?://example.com" 11 | }, 12 | { 13 | "pattern": "^#" 14 | } 15 | ], 16 | "aliveStatusCodes": [429, 200] 17 | } -------------------------------------------------------------------------------- /.github/workflows/fossa.yml: -------------------------------------------------------------------------------- 1 | name: FOSSA scanning 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | 8 | permissions: 9 | contents: read 10 | 11 | jobs: 12 | fossa: 13 | runs-on: ubuntu-latest 14 | steps: 15 | - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 16 | 17 | - uses: fossas/fossa-action@3ebcea1862c6ffbd5cf1b4d0bd6b3fe7bd6f2cac # v1.7.0 18 | with: 19 | api-key: ${{secrets.FOSSA_API_KEY}} 20 | team: OpenTelemetry 21 | -------------------------------------------------------------------------------- /.linkspector.yml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-telemetry/opentelemetry-operator/5d02ecd3c1f4ad94d9c70b5447f3bccf5dab551e/.linkspector.yml -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # Get CA certificates from alpine package repo 2 | FROM alpine:3.22 AS certificates 3 | 4 | RUN apk --no-cache add ca-certificates 5 | 6 | ######## Start a new stage from scratch ####### 7 | FROM scratch 8 | 9 | ARG TARGETARCH 10 | 11 | WORKDIR / 12 | 13 | # Copy the certs from Alpine 14 | COPY --from=certificates /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt 15 | 16 | # Copy binary built on the host 17 | COPY bin/manager_${TARGETARCH} manager 18 | 19 | USER 65532:65532 20 | 21 | ENTRYPOINT ["/manager"] 22 | -------------------------------------------------------------------------------- /apis/v1beta1/testdata/issue-3452.yaml: -------------------------------------------------------------------------------- 1 | receivers: 2 | zipkin: 3 | service: 4 | pipelines: 5 | traces: 6 | receivers: [zipkin] -------------------------------------------------------------------------------- /apis/v1beta1/testdata/otelcol-connectors.yaml: -------------------------------------------------------------------------------- 1 | receivers: 2 | foo: 3 | 4 | exporters: 5 | bar: 6 | 7 | connectors: 8 | count: 9 | spanevents: 10 | my.prod.event.count: 11 | description: The number of span events from my prod environment. 12 | conditions: 13 | - 'attributes["env"] == "prod"' 14 | - 'name == "prodevent"' 15 | 16 | service: 17 | pipelines: 18 | traces: 19 | receivers: [foo] 20 | exporters: [count] 21 | metrics: 22 | receivers: [count] 23 | exporters: [bar] 24 | -------------------------------------------------------------------------------- /apis/v1beta1/testdata/otelcol-extensions.yaml: -------------------------------------------------------------------------------- 1 | extensions: 2 | oauth2client: 3 | client_id: agent 4 | client_secret: some-secret 5 | token_url: http://localhost:8080/auth/realms/opentelemetry/protocol/openid-connect/token 6 | 7 | receivers: 8 | otlp: 9 | protocols: 10 | grpc: 11 | endpoint: localhost:4317 12 | 13 | exporters: 14 | otlp/auth: 15 | endpoint: remote-collector:4317 16 | auth: 17 | authenticator: oauth2client 18 | 19 | service: 20 | extensions: 21 | - oauth2client 22 | pipelines: 23 | traces: 24 | receivers: 25 | - otlp 26 | exporters: 27 | - otlp/auth 28 | -------------------------------------------------------------------------------- /apis/v1beta1/testdata/otelcol-k8sevents.yaml: -------------------------------------------------------------------------------- 1 | # https://github.com/open-telemetry/opentelemetry-operator/issues/3133 2 | receivers: 3 | otlp: 4 | protocols: 5 | http: 6 | k8s_events: 7 | namespaces: [chainsaw-k8seventsreceiver] 8 | exporters: 9 | debug: 10 | verbosity: detailed 11 | service: 12 | pipelines: 13 | logs: 14 | receivers: [k8s_events] 15 | exporters: [debug] 16 | traces: 17 | receivers: [otlp] 18 | exporters: [debug] 19 | -------------------------------------------------------------------------------- /apis/v1beta1/testdata/otelcol-null-values.yaml: -------------------------------------------------------------------------------- 1 | # Taken from https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/examples/demo/otel-collector-config.yaml 2 | receivers: 3 | otlp: 4 | protocols: 5 | grpc: 6 | http: 7 | 8 | exporters: 9 | otlp: 10 | endpoint: 11 | 12 | processors: 13 | batch: 14 | 15 | extensions: 16 | health_check: 17 | 18 | connectors: 19 | spanmetrics: 20 | 21 | service: 22 | pipelines: 23 | traces: 24 | metrics: 25 | -------------------------------------------------------------------------------- /apis/v1beta1/testdata/otelcol-pipelines.yaml: -------------------------------------------------------------------------------- 1 | receivers: 2 | otlp: 3 | protocols: 4 | grpc: 5 | http: 6 | exporters: 7 | debug: 8 | service: 9 | pipelines: 10 | traces: 11 | receivers: 12 | - otlp 13 | exporters: 14 | - debug -------------------------------------------------------------------------------- /autoinstrumentation/apache-httpd/README.md: -------------------------------------------------------------------------------- 1 | # How to build Apache HTTPD auto-instrumentation docker image 2 | 3 | To build image for Apache HTTPD auto instrumentation, use the following commands 4 | 5 | ``` 6 | export REPO_NAME="" 7 | export IMAGE_NAME_PREFIX="autoinstrumentation-apache-httpd" 8 | export IMAGE_VERSION=`cat version.txt` 9 | export IMAGE_NAME=${REPO_NAME}/${IMAGE_NAME_PREFIX}:${IMAGE_VERSION} 10 | docker build --build-arg version=${IMAGE_VERSION} . -t ${IMAGE_NAME} -t ${REPO_NAME}/${IMAGE_NAME_PREFIX}:latest 11 | docker push ${IMAGE_NAME} 12 | docker push ${REPO_NAME}/${IMAGE_NAME_PREFIX}:latest 13 | ``` 14 | -------------------------------------------------------------------------------- /autoinstrumentation/apache-httpd/version.txt: -------------------------------------------------------------------------------- 1 | 1.0.4 -------------------------------------------------------------------------------- /autoinstrumentation/dotnet/version.txt: -------------------------------------------------------------------------------- 1 | 1.11.0 2 | -------------------------------------------------------------------------------- /autoinstrumentation/java/version.txt: -------------------------------------------------------------------------------- 1 | 2.16.0 2 | -------------------------------------------------------------------------------- /autoinstrumentation/nodejs/.dockerignore: -------------------------------------------------------------------------------- 1 | build 2 | node_modules -------------------------------------------------------------------------------- /autoinstrumentation/php/version.txt: -------------------------------------------------------------------------------- 1 | 1.1.0 2 | -------------------------------------------------------------------------------- /bundle/community/manifests/opentelemetry-operator-controller-manager-metrics-service_v1_service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | creationTimestamp: null 5 | labels: 6 | app.kubernetes.io/name: opentelemetry-operator 7 | control-plane: controller-manager 8 | name: opentelemetry-operator-controller-manager-metrics-service 9 | spec: 10 | ports: 11 | - name: https 12 | port: 8443 13 | protocol: TCP 14 | targetPort: https 15 | selector: 16 | app.kubernetes.io/name: opentelemetry-operator 17 | control-plane: controller-manager 18 | status: 19 | loadBalancer: {} 20 | -------------------------------------------------------------------------------- /bundle/community/manifests/opentelemetry-operator-metrics-reader_rbac.authorization.k8s.io_v1_clusterrole.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | creationTimestamp: null 5 | labels: 6 | app.kubernetes.io/name: opentelemetry-operator 7 | name: opentelemetry-operator-metrics-reader 8 | rules: 9 | - nonResourceURLs: 10 | - /metrics 11 | verbs: 12 | - get 13 | -------------------------------------------------------------------------------- /bundle/community/manifests/opentelemetry-operator-webhook-service_v1_service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | creationTimestamp: null 5 | labels: 6 | app.kubernetes.io/name: opentelemetry-operator 7 | name: opentelemetry-operator-webhook-service 8 | spec: 9 | ports: 10 | - port: 443 11 | protocol: TCP 12 | targetPort: 9443 13 | selector: 14 | app.kubernetes.io/name: opentelemetry-operator 15 | control-plane: controller-manager 16 | status: 17 | loadBalancer: {} 18 | -------------------------------------------------------------------------------- /bundle/openshift/manifests/opentelemetry-operator-metrics-reader_rbac.authorization.k8s.io_v1_clusterrole.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | creationTimestamp: null 5 | labels: 6 | app.kubernetes.io/name: opentelemetry-operator 7 | name: opentelemetry-operator-metrics-reader 8 | rules: 9 | - nonResourceURLs: 10 | - /metrics 11 | verbs: 12 | - get 13 | -------------------------------------------------------------------------------- /bundle/openshift/manifests/opentelemetry-operator-prometheus_rbac.authorization.k8s.io_v1_role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: Role 3 | metadata: 4 | name: opentelemetry-operator-prometheus 5 | rules: 6 | - apiGroups: 7 | - "" 8 | resources: 9 | - services 10 | - endpoints 11 | - pods 12 | verbs: 13 | - get 14 | - list 15 | - watch 16 | -------------------------------------------------------------------------------- /bundle/openshift/manifests/opentelemetry-operator-prometheus_rbac.authorization.k8s.io_v1_rolebinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: RoleBinding 3 | metadata: 4 | name: opentelemetry-operator-prometheus 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: Role 8 | name: opentelemetry-operator-prometheus 9 | subjects: 10 | - kind: ServiceAccount 11 | name: prometheus-k8s 12 | namespace: openshift-monitoring 13 | -------------------------------------------------------------------------------- /bundle/openshift/manifests/opentelemetry-operator-webhook-service_v1_service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | creationTimestamp: null 5 | labels: 6 | app.kubernetes.io/name: opentelemetry-operator 7 | name: opentelemetry-operator-webhook-service 8 | spec: 9 | ports: 10 | - port: 443 11 | protocol: TCP 12 | targetPort: 9443 13 | selector: 14 | app.kubernetes.io/name: opentelemetry-operator 15 | control-plane: controller-manager 16 | status: 17 | loadBalancer: {} 18 | -------------------------------------------------------------------------------- /cmd/gather/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM registry.access.redhat.com/ubi9-minimal:9.2 2 | 3 | RUN INSTALL_PKGS=" \ 4 | rsync \ 5 | tar \ 6 | " && \ 7 | microdnf install -y $INSTALL_PKGS && \ 8 | microdnf clean all 9 | WORKDIR / 10 | 11 | ARG TARGETARCH 12 | COPY bin/must-gather_${TARGETARCH} /usr/bin/must-gather 13 | 14 | USER 65532:65532 15 | 16 | ENTRYPOINT ["/usr/bin/must-gather"] 17 | -------------------------------------------------------------------------------- /cmd/operator-opamp-bridge/Dockerfile: -------------------------------------------------------------------------------- 1 | # Get CA certificates from the Alpine package repo 2 | FROM alpine:3.22 AS certificates 3 | 4 | RUN apk --no-cache add ca-certificates 5 | 6 | # Start a new stage from scratch 7 | FROM scratch 8 | 9 | ARG TARGETARCH 10 | 11 | WORKDIR /root/ 12 | 13 | # Copy the certs 14 | COPY --from=certificates /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt 15 | 16 | # Copy binary built on the host 17 | COPY bin/opampbridge_${TARGETARCH} ./main 18 | 19 | # "nonroot" 20 | USER 65532:65532 21 | ENTRYPOINT ["./main"] 22 | -------------------------------------------------------------------------------- /cmd/operator-opamp-bridge/internal/agent/testdata/agent.yaml: -------------------------------------------------------------------------------- 1 | endpoint: ws://127.0.0.1:4320/v1/opamp 2 | capabilities: 3 | AcceptsRemoteConfig: true 4 | ReportsEffectiveConfig: true 5 | AcceptsPackages: false 6 | ReportsPackageStatuses: false 7 | ReportsOwnTraces: true 8 | ReportsOwnMetrics: true 9 | ReportsOwnLogs: true 10 | AcceptsOpAMPConnectionSettings: true 11 | AcceptsOtherConnectionSettings: true 12 | AcceptsRestartCommand: true 13 | ReportsHealth: true 14 | ReportsRemoteConfig: true 15 | -------------------------------------------------------------------------------- /cmd/operator-opamp-bridge/internal/agent/testdata/agenthttpbasic.yaml: -------------------------------------------------------------------------------- 1 | endpoint: http://127.0.0.1:4320/v1/opamp 2 | capabilities: 3 | AcceptsRemoteConfig: true 4 | ReportsEffectiveConfig: true 5 | AcceptsPackages: false 6 | ReportsPackageStatuses: false 7 | ReportsOwnTraces: true 8 | ReportsOwnMetrics: true 9 | ReportsOwnLogs: true 10 | AcceptsOpAMPConnectionSettings: true 11 | AcceptsOtherConnectionSettings: true 12 | AcceptsRestartCommand: true 13 | ReportsHealth: true 14 | ReportsRemoteConfig: true 15 | -------------------------------------------------------------------------------- /cmd/operator-opamp-bridge/internal/agent/testdata/agentnoprocessorsallowed.yaml: -------------------------------------------------------------------------------- 1 | endpoint: ws://127.0.0.1:4320/v1/opamp 2 | capabilities: 3 | AcceptsRemoteConfig: true 4 | ReportsEffectiveConfig: true 5 | AcceptsPackages: false 6 | ReportsPackageStatuses: false 7 | ReportsOwnTraces: true 8 | ReportsOwnMetrics: true 9 | ReportsOwnLogs: true 10 | AcceptsOpAMPConnectionSettings: true 11 | AcceptsOtherConnectionSettings: true 12 | AcceptsRestartCommand: true 13 | ReportsHealth: true 14 | ReportsRemoteConfig: true 15 | componentsAllowed: 16 | receivers: 17 | - otlp 18 | exporters: 19 | - debug 20 | -------------------------------------------------------------------------------- /cmd/operator-opamp-bridge/internal/config/headers.go: -------------------------------------------------------------------------------- 1 | // Copyright The OpenTelemetry Authors 2 | // SPDX-License-Identifier: Apache-2.0 3 | 4 | package config 5 | 6 | import "net/http" 7 | 8 | type Headers map[string]string 9 | 10 | func (h Headers) ToHTTPHeader() http.Header { 11 | newMap := make(map[string][]string) 12 | for key, value := range h { 13 | newMap[key] = []string{value} 14 | } 15 | return newMap 16 | } 17 | -------------------------------------------------------------------------------- /cmd/operator-opamp-bridge/internal/config/testdata/agent.yaml: -------------------------------------------------------------------------------- 1 | endpoint: ws://127.0.0.1:4320/v1/opamp 2 | capabilities: 3 | AcceptsRemoteConfig: true 4 | ReportsEffectiveConfig: true 5 | AcceptsPackages: false 6 | ReportsPackageStatuses: false 7 | ReportsOwnTraces: true 8 | ReportsOwnMetrics: true 9 | ReportsOwnLogs: true 10 | AcceptsOpAMPConnectionSettings: true 11 | AcceptsOtherConnectionSettings: true 12 | AcceptsRestartCommand: true 13 | ReportsHealth: true 14 | ReportsRemoteConfig: true 15 | -------------------------------------------------------------------------------- /cmd/operator-opamp-bridge/internal/config/testdata/agentbadconf.yaml: -------------------------------------------------------------------------------- 1 | endpoint: http://127.0.0.1:4320/v1/opamp 2 | cawdawapabilities: 3 | AcceptsRemoteConfig: true 4 | ReportsEffectiveConfig: true 5 | AcceptsPackages: 6 | false 7 | ReportsPackageStatuses: false 8 | Report 9 | sOwnTraces: true 10 | Re 11 | 12 | portsOwnMetrics: true 13 | ReportsOwnLogs: true 14 | AcceptsOpAMPConnectionSettings: true 15 | AcceptsOtherConnectionSettings: true 16 | AcceptsRestartCommand: true 17 | ReportsHealth: true 18 | ReportsRemoteConfig: true 19 | -------------------------------------------------------------------------------- /cmd/operator-opamp-bridge/internal/config/testdata/agenthttpbasic.yaml: -------------------------------------------------------------------------------- 1 | endpoint: http://127.0.0.1:4320/v1/opamp 2 | heartbeatInterval: 45s 3 | name: "http-test-bridge" 4 | capabilities: 5 | AcceptsRemoteConfig: true 6 | ReportsEffectiveConfig: true 7 | AcceptsPackages: false 8 | ReportsPackageStatuses: false 9 | ReportsOwnTraces: true 10 | ReportsOwnMetrics: true 11 | ReportsOwnLogs: true 12 | AcceptsOpAMPConnectionSettings: true 13 | AcceptsOtherConnectionSettings: true 14 | AcceptsRestartCommand: true 15 | ReportsHealth: true 16 | ReportsRemoteConfig: true 17 | -------------------------------------------------------------------------------- /cmd/operator-opamp-bridge/internal/config/testdata/agentwithdescription.yaml: -------------------------------------------------------------------------------- 1 | endpoint: ws://127.0.0.1:4320/v1/opamp 2 | description: 3 | non_identifying_attributes: 4 | custom.attribute: "custom-value" 5 | capabilities: 6 | AcceptsRemoteConfig: true 7 | ReportsEffectiveConfig: true 8 | AcceptsPackages: false 9 | ReportsPackageStatuses: false 10 | ReportsOwnTraces: true 11 | ReportsOwnMetrics: true 12 | ReportsOwnLogs: true 13 | AcceptsOpAMPConnectionSettings: true 14 | AcceptsOtherConnectionSettings: true 15 | AcceptsRestartCommand: true 16 | ReportsHealth: true 17 | ReportsRemoteConfig: true 18 | -------------------------------------------------------------------------------- /cmd/operator-opamp-bridge/internal/config/testdata/kubeconfig.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Config 3 | clusters: 4 | - cluster: 5 | server: https://example.com 6 | name: test-cluster 7 | contexts: 8 | - context: 9 | cluster: test-cluster 10 | user: test-user 11 | name: test-context 12 | current-context: test-context 13 | users: 14 | - name: test-user 15 | user: 16 | token: dummy-token 17 | -------------------------------------------------------------------------------- /cmd/otel-allocator/Dockerfile: -------------------------------------------------------------------------------- 1 | # Get CA certificates from the Alpine package repo 2 | FROM alpine:3.22 AS certificates 3 | 4 | RUN apk --no-cache add ca-certificates 5 | 6 | # Start a new stage from scratch 7 | FROM scratch 8 | 9 | ARG TARGETARCH 10 | 11 | WORKDIR / 12 | 13 | # Copy the certs 14 | COPY --from=certificates /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt 15 | 16 | # Copy binary built on the host 17 | COPY bin/targetallocator_${TARGETARCH} ./main 18 | 19 | USER 65532:65532 20 | 21 | ENTRYPOINT ["./main"] 22 | -------------------------------------------------------------------------------- /cmd/otel-allocator/internal/config/testdata/file_sd_test.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "labels": { 4 | "job": "node" 5 | }, 6 | "targets": [ 7 | "promfile.domain:1001" 8 | ] 9 | }, 10 | { 11 | "labels": { 12 | "foo1": "bar1" 13 | }, 14 | "targets": [ 15 | "promfile.domain:3000" 16 | ] 17 | } 18 | ] 19 | -------------------------------------------------------------------------------- /cmd/otel-allocator/internal/config/testdata/no_config.yaml: -------------------------------------------------------------------------------- 1 | # this is some random data to check if we skip unknown fields instead of rejecting them 2 | some_key: some_value 3 | -------------------------------------------------------------------------------- /cmd/otel-allocator/internal/config/testdata/pod_service_selector_camelcase_test.yaml: -------------------------------------------------------------------------------- 1 | collector_namespace: default 2 | collector_selector: 3 | matchLabels: 4 | app.kubernetes.io/instance: default.test 5 | app.kubernetes.io/managed-by: opentelemetry-operator 6 | prometheus_cr: 7 | pod_monitor_selector: 8 | matchLabels: 9 | release: test 10 | service_monitor_selector: 11 | matchLabels: 12 | release: test 13 | config: 14 | scrape_configs: 15 | - job_name: prometheus 16 | static_configs: 17 | - targets: ["prom.domain:9001", "prom.domain:9002", "prom.domain:9003"] 18 | labels: 19 | my: label -------------------------------------------------------------------------------- /cmd/otel-allocator/internal/config/testdata/pod_service_selector_test.yaml: -------------------------------------------------------------------------------- 1 | collector_namespace: default 2 | collector_selector: 3 | matchlabels: 4 | app.kubernetes.io/instance: default.test 5 | app.kubernetes.io/managed-by: opentelemetry-operator 6 | prometheus_cr: 7 | pod_monitor_selector: 8 | matchlabels: 9 | release: test 10 | service_monitor_selector: 11 | matchlabels: 12 | release: test 13 | config: 14 | scrape_configs: 15 | - job_name: prometheus 16 | static_configs: 17 | - targets: ["prom.domain:9001", "prom.domain:9002", "prom.domain:9003"] 18 | labels: 19 | my: label -------------------------------------------------------------------------------- /cmd/otel-allocator/internal/server/testdata/prom-no-config.yaml: -------------------------------------------------------------------------------- 1 | collector_selector: 2 | matchlabels: 3 | app.kubernetes.io/instance: default.test 4 | app.kubernetes.io/managed-by: opentelemetry-operator 5 | prometheus_cr: 6 | scrape_interval: 60s 7 | config: 8 | scrape_configs: 9 | -------------------------------------------------------------------------------- /cmd/otel-allocator/internal/target/testdata/test.yaml: -------------------------------------------------------------------------------- 1 | collector_selector: 2 | matchlabels: 3 | app.kubernetes.io/instance: default.test 4 | app.kubernetes.io/managed-by: opentelemetry-operator 5 | config: 6 | scrape_configs: 7 | - job_name: prometheus 8 | 9 | file_sd_configs: 10 | - files: 11 | - ../config/testdata/file_sd_test.json 12 | static_configs: 13 | - targets: ["prom.domain:9001", "prom.domain:9002", "prom.domain:9003"] 14 | labels: 15 | my: label 16 | - job_name: prometheus2 17 | static_configs: 18 | - targets: ["prom.domain:8001"] 19 | -------------------------------------------------------------------------------- /cmd/otel-allocator/internal/target/testdata/test_update.yaml: -------------------------------------------------------------------------------- 1 | collector_selector: 2 | matchlabels: 3 | app.kubernetes.io/instance: default.test 4 | app.kubernetes.io/managed-by: opentelemetry-operator 5 | config: 6 | scrape_configs: 7 | - job_name: prometheus 8 | 9 | file_sd_configs: 10 | - files: 11 | - ../config/testdata/file_sd_test.json 12 | static_configs: 13 | - targets: ["prom.domain:9004", "prom.domain:9005"] 14 | labels: 15 | my: other-label 16 | -------------------------------------------------------------------------------- /config/certmanager/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - certificate.yaml 3 | 4 | configurations: 5 | - kustomizeconfig.yaml 6 | apiVersion: kustomize.config.k8s.io/v1beta1 7 | kind: Kustomization 8 | -------------------------------------------------------------------------------- /config/certmanager/kustomizeconfig.yaml: -------------------------------------------------------------------------------- 1 | # This configuration is for teaching kustomize how to update name ref and var substitution 2 | nameReference: 3 | - kind: Issuer 4 | group: cert-manager.io 5 | fieldSpecs: 6 | - kind: Certificate 7 | group: cert-manager.io 8 | path: spec/issuerRef/name 9 | 10 | varReference: 11 | - kind: Certificate 12 | group: cert-manager.io 13 | path: spec/commonName 14 | - kind: Certificate 15 | group: cert-manager.io 16 | path: spec/dnsNames 17 | -------------------------------------------------------------------------------- /config/crd/kustomizeconfig.yaml: -------------------------------------------------------------------------------- 1 | # This file is for teaching kustomize how to substitute name and namespace reference in CRD 2 | nameReference: 3 | - kind: Service 4 | version: v1 5 | fieldSpecs: 6 | - kind: CustomResourceDefinition 7 | group: apiextensions.k8s.io 8 | path: spec/conversion/webhookClientConfig/service/name 9 | 10 | namespace: 11 | - kind: CustomResourceDefinition 12 | group: apiextensions.k8s.io 13 | path: spec/conversion/webhookClientConfig/service/namespace 14 | create: false 15 | 16 | varReference: 17 | - path: metadata/annotations 18 | -------------------------------------------------------------------------------- /config/crd/patches/cainjection_in_opampbridges.yaml: -------------------------------------------------------------------------------- 1 | # The following patch adds a directive for certmanager to inject CA into the CRD 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | annotations: 6 | cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) 7 | name: opampbridges.opentelemetry.io 8 | -------------------------------------------------------------------------------- /config/crd/patches/cainjection_in_opentelemetrycollectors.yaml: -------------------------------------------------------------------------------- 1 | # The following patch adds a directive for certmanager to inject CA into the CRD 2 | # CRD conversion requires k8s 1.13 or later. 3 | apiVersion: apiextensions.k8s.io/v1 4 | kind: CustomResourceDefinition 5 | metadata: 6 | annotations: 7 | cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) 8 | name: opentelemetrycollectors.opentelemetry.io 9 | -------------------------------------------------------------------------------- /config/crd/patches/cainjection_in_targetallocators.yaml: -------------------------------------------------------------------------------- 1 | # The following patch adds a directive for certmanager to inject CA into the CRD 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | annotations: 6 | cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) 7 | name: targetallocators.opentelemetry.io 8 | -------------------------------------------------------------------------------- /config/crd/patches/webhook_in_opentelemetrycollectors.yaml: -------------------------------------------------------------------------------- 1 | # The following patch enables a conversion webhook for the CRD 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | name: opentelemetrycollectors.opentelemetry.io 6 | spec: 7 | conversion: 8 | strategy: Webhook 9 | webhook: 10 | clientConfig: 11 | service: 12 | namespace: system 13 | name: opentelemetry-operator-webhook-service 14 | path: /convert 15 | conversionReviewVersions: 16 | - v1alpha1 17 | - v1beta1 18 | -------------------------------------------------------------------------------- /config/manager/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - manager.yaml 3 | -------------------------------------------------------------------------------- /config/manifests/community/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - ../../default 3 | - ../../samples 4 | - ../../scorecard 5 | apiVersion: kustomize.config.k8s.io/v1beta1 6 | kind: Kustomization 7 | -------------------------------------------------------------------------------- /config/manifests/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - ../default 3 | - ../samples 4 | - ../scorecard 5 | apiVersion: kustomize.config.k8s.io/v1beta1 6 | kind: Kustomization 7 | -------------------------------------------------------------------------------- /config/manifests/openshift/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - ../../overlays/openshift 3 | - ../../samples 4 | - ../../scorecard 5 | apiVersion: kustomize.config.k8s.io/v1beta1 6 | kind: Kustomization 7 | -------------------------------------------------------------------------------- /config/overlays/openshift/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - ../../default 3 | 4 | patches: 5 | - path: manager-patch.yaml 6 | target: 7 | group: apps 8 | kind: Deployment 9 | name: controller-manager 10 | version: v1 11 | - path: metrics_service_tls_patch.yaml 12 | - path: manager_auth_proxy_tls_patch.yaml 13 | 14 | apiVersion: kustomize.config.k8s.io/v1beta1 15 | kind: Kustomization 16 | -------------------------------------------------------------------------------- /config/overlays/openshift/manager-patch.yaml: -------------------------------------------------------------------------------- 1 | - op: add 2 | path: "/spec/template/spec/containers/0/args" 3 | value: 4 | - --metrics-addr=127.0.0.1:8080 5 | - --enable-leader-election 6 | - --zap-log-level=info 7 | - --zap-time-encoding=rfc3339nano 8 | - --enable-nginx-instrumentation=true 9 | - '--enable-go-instrumentation=true' 10 | - '--openshift-create-dashboard=true' 11 | - '--feature-gates=+operator.observability.prometheus' 12 | - '--enable-cr-metrics=true' 13 | - '--create-sm-operator-metrics=true' 14 | -------------------------------------------------------------------------------- /config/overlays/openshift/metrics_service_tls_patch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | annotations: 5 | service.beta.openshift.io/serving-cert-secret-name: opentelemetry-operator-metrics 6 | name: controller-manager-metrics-service 7 | namespace: system 8 | -------------------------------------------------------------------------------- /config/rbac/_opampbridge_editor_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions for end users to edit opampbridges. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: opampbridge-editor-role 6 | rules: 7 | - apiGroups: 8 | - opentelemetry.io 9 | resources: 10 | - opampbridges 11 | verbs: 12 | - create 13 | - delete 14 | - get 15 | - list 16 | - patch 17 | - update 18 | - watch 19 | - apiGroups: 20 | - opentelemetry.io 21 | resources: 22 | - opampbridges/status 23 | verbs: 24 | - get 25 | -------------------------------------------------------------------------------- /config/rbac/_opampbridge_viewer_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions for end users to view opampbridges. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: opampbridge-viewer-role 6 | rules: 7 | - apiGroups: 8 | - opentelemetry.io 9 | resources: 10 | - opampbridges 11 | verbs: 12 | - get 13 | - list 14 | - watch 15 | - apiGroups: 16 | - opentelemetry.io 17 | resources: 18 | - opampbridges/status 19 | verbs: 20 | - get 21 | -------------------------------------------------------------------------------- /config/rbac/auth_proxy_client_clusterrole.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: metrics-reader 5 | rules: 6 | - nonResourceURLs: ["/metrics"] 7 | verbs: ["get"] 8 | -------------------------------------------------------------------------------- /config/rbac/auth_proxy_role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: proxy-role 5 | rules: 6 | - apiGroups: ["authentication.k8s.io"] 7 | resources: 8 | - tokenreviews 9 | verbs: ["create"] 10 | - apiGroups: ["authorization.k8s.io"] 11 | resources: 12 | - subjectaccessreviews 13 | verbs: ["create"] 14 | -------------------------------------------------------------------------------- /config/rbac/auth_proxy_role_binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: proxy-rolebinding 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: proxy-role 9 | subjects: 10 | - kind: ServiceAccount 11 | name: controller-manager 12 | namespace: system 13 | -------------------------------------------------------------------------------- /config/rbac/auth_proxy_service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: opentelemetry-operator 6 | control-plane: controller-manager 7 | name: controller-manager-metrics-service 8 | namespace: system 9 | spec: 10 | ports: 11 | - name: https 12 | port: 8443 13 | targetPort: https 14 | protocol: TCP 15 | selector: 16 | app.kubernetes.io/name: opentelemetry-operator 17 | control-plane: controller-manager 18 | -------------------------------------------------------------------------------- /config/rbac/kustomization.yaml: -------------------------------------------------------------------------------- 1 | # Comment the following 4 lines if you want to disable 2 | # the auth proxy (https://github.com/brancz/kube-rbac-proxy) 3 | # which protects your /metrics endpoint. 4 | resources: 5 | - service_account.yaml 6 | - role.yaml 7 | - role_binding.yaml 8 | - leader_election_role.yaml 9 | - leader_election_role_binding.yaml 10 | - auth_proxy_service.yaml 11 | - auth_proxy_role.yaml 12 | - auth_proxy_role_binding.yaml 13 | - auth_proxy_client_clusterrole.yaml 14 | apiVersion: kustomize.config.k8s.io/v1beta1 15 | kind: Kustomization 16 | -------------------------------------------------------------------------------- /config/rbac/leader_election_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions to do leader election. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: Role 4 | metadata: 5 | name: leader-election-role 6 | rules: 7 | - apiGroups: 8 | - "" 9 | resources: 10 | - configmaps 11 | verbs: 12 | - get 13 | - list 14 | - watch 15 | - create 16 | - update 17 | - patch 18 | - delete 19 | - apiGroups: 20 | - "" 21 | resources: 22 | - configmaps/status 23 | verbs: 24 | - get 25 | - update 26 | - patch 27 | - apiGroups: 28 | - "" 29 | resources: 30 | - events 31 | verbs: 32 | - create 33 | - patch 34 | -------------------------------------------------------------------------------- /config/rbac/leader_election_role_binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: RoleBinding 3 | metadata: 4 | name: leader-election-rolebinding 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: Role 8 | name: leader-election-role 9 | subjects: 10 | - kind: ServiceAccount 11 | name: controller-manager 12 | namespace: system 13 | -------------------------------------------------------------------------------- /config/rbac/opentelemetrycollector_editor_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions for end users to edit opentelemetrycollectors. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: opentelemetrycollector-editor-role 6 | rules: 7 | - apiGroups: 8 | - opentelemetry.io 9 | resources: 10 | - opentelemetrycollectors 11 | verbs: 12 | - create 13 | - delete 14 | - get 15 | - list 16 | - patch 17 | - update 18 | - watch 19 | - apiGroups: 20 | - opentelemetry.io 21 | resources: 22 | - opentelemetrycollectors/status 23 | verbs: 24 | - get 25 | -------------------------------------------------------------------------------- /config/rbac/opentelemetrycollector_viewer_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions for end users to view opentelemetrycollectors. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: opentelemetrycollector-viewer-role 6 | rules: 7 | - apiGroups: 8 | - opentelemetry.io 9 | resources: 10 | - opentelemetrycollectors 11 | verbs: 12 | - get 13 | - list 14 | - watch 15 | - apiGroups: 16 | - opentelemetry.io 17 | resources: 18 | - opentelemetrycollectors/status 19 | verbs: 20 | - get 21 | -------------------------------------------------------------------------------- /config/rbac/role_binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: manager-rolebinding 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: manager-role 9 | subjects: 10 | - kind: ServiceAccount 11 | name: controller-manager 12 | namespace: system 13 | -------------------------------------------------------------------------------- /config/rbac/service_account.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: controller-manager 5 | namespace: system -------------------------------------------------------------------------------- /config/samples/_v1alpha1_targetallocator.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: opentelemetry.io/v1alpha1 2 | kind: TargetAllocator 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: targetallocator 6 | app.kubernetes.io/instance: targetallocator-sample 7 | app.kubernetes.io/part-of: opentelemetry-operator 8 | app.kubernetes.io/managed-by: kustomize 9 | app.kubernetes.io/created-by: opentelemetry-operator 10 | name: targetallocator-sample 11 | spec: 12 | # TODO(user): Add fields here 13 | -------------------------------------------------------------------------------- /config/samples/core_v1alpha1_opentelemetrycollector.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: opentelemetry.io/v1alpha1 2 | kind: OpenTelemetryCollector 3 | metadata: 4 | name: otel 5 | spec: 6 | config: | 7 | receivers: 8 | otlp: 9 | protocols: 10 | grpc: {} 11 | http: {} 12 | 13 | exporters: 14 | debug: {} 15 | 16 | service: 17 | pipelines: 18 | traces: 19 | receivers: [otlp] 20 | exporters: [debug] 21 | -------------------------------------------------------------------------------- /config/samples/core_v1beta1_opentelemetrycollector.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: opentelemetry.io/v1beta1 2 | kind: OpenTelemetryCollector 3 | metadata: 4 | name: otel 5 | spec: 6 | config: 7 | receivers: 8 | otlp: 9 | protocols: 10 | grpc: {} 11 | http: {} 12 | 13 | exporters: 14 | debug: {} 15 | 16 | service: 17 | pipelines: 18 | traces: 19 | receivers: [otlp] 20 | exporters: [debug] 21 | -------------------------------------------------------------------------------- /config/samples/instrumentation_v1alpha1_instrumentation.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: opentelemetry.io/v1alpha1 2 | kind: Instrumentation 3 | metadata: 4 | name: instrumentation 5 | spec: 6 | exporter: 7 | endpoint: http://otel-collector-headless:4317 8 | -------------------------------------------------------------------------------- /config/samples/kustomization.yaml: -------------------------------------------------------------------------------- 1 | ## Append samples you want in your CSV to this file as resources ## 2 | resources: 3 | - core_v1alpha1_opentelemetrycollector.yaml 4 | - core_v1beta1_opentelemetrycollector.yaml 5 | - instrumentation_v1alpha1_instrumentation.yaml 6 | - _v1alpha1_opampbridge.yaml 7 | #+kubebuilder:scaffold:manifestskustomizesamples 8 | apiVersion: kustomize.config.k8s.io/v1beta1 9 | kind: Kustomization 10 | -------------------------------------------------------------------------------- /config/samples/sidecar.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: opentelemetry.io/v1alpha1 2 | kind: OpenTelemetryCollector 3 | metadata: 4 | name: opentelemetrycollector-as-sidecar 5 | spec: 6 | mode: sidecar 7 | config: | 8 | receivers: 9 | otlp: 10 | protocols: 11 | grpc: {} 12 | http: {} 13 | 14 | exporters: 15 | debug: {} 16 | 17 | service: 18 | pipelines: 19 | traces: 20 | receivers: [otlp] 21 | exporters: [debug] 22 | -------------------------------------------------------------------------------- /config/scorecard/bases/config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: scorecard.operatorframework.io/v1alpha3 2 | kind: Configuration 3 | metadata: 4 | name: config 5 | stages: 6 | - parallel: true 7 | tests: [] 8 | -------------------------------------------------------------------------------- /config/scorecard/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - bases/config.yaml 3 | apiVersion: kustomize.config.k8s.io/v1beta1 4 | kind: Kustomization 5 | patches: 6 | - path: patches/basic.config.yaml 7 | target: 8 | group: scorecard.operatorframework.io 9 | kind: Configuration 10 | name: config 11 | version: v1alpha3 12 | - path: patches/olm.config.yaml 13 | target: 14 | group: scorecard.operatorframework.io 15 | kind: Configuration 16 | name: config 17 | version: v1alpha3 18 | -------------------------------------------------------------------------------- /config/scorecard/patches/basic.config.yaml: -------------------------------------------------------------------------------- 1 | - op: add 2 | path: /stages/0/tests/- 3 | value: 4 | entrypoint: 5 | - scorecard-test 6 | - basic-check-spec 7 | image: quay.io/operator-framework/scorecard-test:v1.27.0 8 | labels: 9 | suite: basic 10 | test: basic-check-spec-test 11 | -------------------------------------------------------------------------------- /config/webhook/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - manifests.yaml 3 | - service.yaml 4 | 5 | configurations: 6 | - kustomizeconfig.yaml 7 | apiVersion: kustomize.config.k8s.io/v1beta1 8 | kind: Kustomization 9 | -------------------------------------------------------------------------------- /config/webhook/service.yaml: -------------------------------------------------------------------------------- 1 | 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: webhook-service 6 | namespace: system 7 | spec: 8 | ports: 9 | - port: 443 10 | targetPort: 9443 11 | protocol: TCP 12 | selector: 13 | app.kubernetes.io/name: opentelemetry-operator 14 | control-plane: controller-manager 15 | -------------------------------------------------------------------------------- /docs/api/README.md: -------------------------------------------------------------------------------- 1 | # API reference for CRDs 2 | 3 | - [Instrumentation](instrumentations.md) 4 | - [OpAMPBridge](opampbridges.md) 5 | - [OpenTelemetryCollector](opentelemetrycollectors.md) 6 | - [TargetAllocator](targetallocators.md) 7 | -------------------------------------------------------------------------------- /hack/boilerplate.go.txt: -------------------------------------------------------------------------------- 1 | // Copyright The OpenTelemetry Authors 2 | // SPDX-License-Identifier: Apache-2.0 3 | -------------------------------------------------------------------------------- /hack/ignore-createdAt-bundle.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Since operator-sdk 1.26.0, `make bundle` changes the `createdAt` field from the bundle 3 | # even if it is patched: 4 | # https://github.com/operator-framework/operator-sdk/pull/6136 5 | # This code checks if only the createdAt field. If is the only change, it is ignored. 6 | # Else, it will do nothing. 7 | # https://github.com/operator-framework/operator-sdk/issues/6285#issuecomment-1415350333 8 | git diff --quiet -I'^ createdAt: ' bundle 9 | if ((! $?)) ; then 10 | git checkout bundle 11 | fi 12 | -------------------------------------------------------------------------------- /hack/install-prometheus-operator.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [[ "$(kubectl api-resources --api-group=monitoring.coreos.com -o name)" ]]; then 4 | echo "Prometheus CRDs are there" 5 | else 6 | kubectl create -f https://github.com/prometheus-operator/prometheus-operator/releases/download/v0.66.0/bundle.yaml 7 | fi 8 | -------------------------------------------------------------------------------- /internal/autodetect/collector/operator.go: -------------------------------------------------------------------------------- 1 | // Copyright The OpenTelemetry Authors 2 | // SPDX-License-Identifier: Apache-2.0 3 | 4 | package collector 5 | 6 | // Availability represents that the OpenTelemetryCollector CR is available in the cluster. 7 | type Availability int 8 | 9 | const ( 10 | // NotAvailable OpenTelemetryCollector CR is not available in the cluster. 11 | NotAvailable Availability = iota 12 | 13 | // Available OpenTelemetryCollector CR is available in the cluster. 14 | Available 15 | ) 16 | 17 | func (p Availability) String() string { 18 | return [...]string{"NotAvailable", "Available"}[p] 19 | } 20 | -------------------------------------------------------------------------------- /internal/autodetect/opampbridge/operator.go: -------------------------------------------------------------------------------- 1 | // Copyright The OpenTelemetry Authors 2 | // SPDX-License-Identifier: Apache-2.0 3 | 4 | package opampbridge 5 | 6 | // Availability represents that the OpAmpBridge CR is available in the cluster. 7 | type Availability int 8 | 9 | const ( 10 | // NotAvailable OpAmpBridge CR is not available in the cluster. 11 | NotAvailable Availability = iota 12 | 13 | // Available OpAmpBridge CR is available in the cluster. 14 | Available 15 | ) 16 | 17 | func (p Availability) String() string { 18 | return [...]string{"NotAvailable", "Available"}[p] 19 | } 20 | -------------------------------------------------------------------------------- /internal/autodetect/openshift/routes.go: -------------------------------------------------------------------------------- 1 | // Copyright The OpenTelemetry Authors 2 | // SPDX-License-Identifier: Apache-2.0 3 | 4 | package openshift 5 | 6 | // RoutesAvailability holds the auto-detected OpenShift Routes availability API. 7 | type RoutesAvailability int 8 | 9 | const ( 10 | // RoutesAvailable represents the route.openshift.io API is available. 11 | RoutesAvailable RoutesAvailability = iota 12 | 13 | // RoutesNotAvailable represents the route.openshift.io API is not available. 14 | RoutesNotAvailable 15 | ) 16 | 17 | func (p RoutesAvailability) String() string { 18 | return [...]string{"Available", "NotAvailable"}[p] 19 | } 20 | -------------------------------------------------------------------------------- /internal/autodetect/prometheus/operator.go: -------------------------------------------------------------------------------- 1 | // Copyright The OpenTelemetry Authors 2 | // SPDX-License-Identifier: Apache-2.0 3 | 4 | package prometheus 5 | 6 | // Availability represents what CRDs are available from the prometheus operator. 7 | type Availability int 8 | 9 | const ( 10 | // NotAvailable represents the monitoring.coreos.com is not available. 11 | NotAvailable Availability = iota 12 | 13 | // Available represents the monitoring.coreos.com is available. 14 | Available 15 | ) 16 | 17 | func (p Availability) String() string { 18 | return [...]string{"NotAvailable", "Available"}[p] 19 | } 20 | -------------------------------------------------------------------------------- /internal/autodetect/rbac/operator.go: -------------------------------------------------------------------------------- 1 | // Copyright The OpenTelemetry Authors 2 | // SPDX-License-Identifier: Apache-2.0 3 | 4 | package rbac 5 | 6 | // Availability represents that the opeerator service account has permissions to create RBAC resources. 7 | type Availability int 8 | 9 | const ( 10 | // NotAvailable RBAC permissions are not available. 11 | NotAvailable Availability = iota 12 | 13 | // Available NotAvailable RBAC permissions are available. 14 | Available 15 | ) 16 | 17 | func (p Availability) String() string { 18 | return [...]string{"NotAvailable", "Available"}[p] 19 | } 20 | -------------------------------------------------------------------------------- /internal/autodetect/targetallocator/operator.go: -------------------------------------------------------------------------------- 1 | // Copyright The OpenTelemetry Authors 2 | // SPDX-License-Identifier: Apache-2.0 3 | 4 | package targetallocator 5 | 6 | // Availability represents that the TargetAllocator CR is available in the cluster. 7 | type Availability int 8 | 9 | const ( 10 | // NotAvailable TargetAllocator CR is not available in the cluster. 11 | NotAvailable Availability = iota 12 | 13 | // Available TargetAllocator CR is available in the cluster. 14 | Available 15 | ) 16 | 17 | func (p Availability) String() string { 18 | return [...]string{"NotAvailable", "Available"}[p] 19 | } 20 | -------------------------------------------------------------------------------- /internal/controllers/testdata/ingress_testdata.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | receivers: 3 | otlp: 4 | protocols: 5 | grpc: 6 | endpoint: 0.0.0.0:12345 7 | otlp/test: 8 | protocols: 9 | grpc: 10 | endpoint: 0.0.0.0:12346 11 | exporters: 12 | debug: 13 | service: 14 | pipelines: 15 | traces: 16 | receivers: [otlp, otlp/test] 17 | exporters: [debug] 18 | -------------------------------------------------------------------------------- /internal/controllers/testdata/otlp_test.yaml: -------------------------------------------------------------------------------- 1 | receivers: 2 | otlp: 3 | protocols: 4 | grpc: 5 | http: 6 | processors: 7 | exporters: 8 | otlp: 9 | endpoint: jaeger-allinone-collector-headless.chainsaw-otlp-metrics.svc:4317 10 | tls: 11 | insecure: true 12 | prometheus: 13 | endpoint: 0.0.0.0:8889 14 | resource_to_telemetry_conversion: 15 | enabled: true # by default resource attributes are dropped 16 | service: 17 | pipelines: 18 | traces: 19 | receivers: [otlp] 20 | exporters: [otlp] 21 | metrics: 22 | receivers: [otlp] 23 | exporters: [prometheus] 24 | -------------------------------------------------------------------------------- /internal/controllers/testdata/test.yaml: -------------------------------------------------------------------------------- 1 | processors: 2 | receivers: 3 | jaeger: 4 | protocols: 5 | grpc: 6 | prometheus: 7 | config: 8 | scrape_configs: 9 | - job_name: otel-collector 10 | scrape_interval: 10s 11 | static_configs: 12 | - targets: [ '0.0.0.0:8888', '0.0.0.0:9999' ] 13 | 14 | exporters: 15 | debug: 16 | 17 | service: 18 | pipelines: 19 | metrics: 20 | receivers: [prometheus, jaeger] 21 | exporters: [debug] -------------------------------------------------------------------------------- /internal/controllers/testdata/test_ta_update.yaml: -------------------------------------------------------------------------------- 1 | processors: 2 | receivers: 3 | jaeger: 4 | protocols: 5 | grpc: 6 | prometheus: 7 | config: 8 | scrape_configs: 9 | - job_name: otel-collector 10 | scrape_interval: 10s 11 | static_configs: 12 | - targets: [ '0.0.0.0:8888', '0.0.0.0:9999', '0.0.0.0:10100' ] 13 | 14 | exporters: 15 | debug: 16 | 17 | service: 18 | pipelines: 19 | metrics: 20 | receivers: [prometheus, jaeger] 21 | exporters: [debug] -------------------------------------------------------------------------------- /internal/manifests/collector/testdata/config_expected_targetallocator.yaml: -------------------------------------------------------------------------------- 1 | exporters: 2 | debug: 3 | receivers: 4 | prometheus: 5 | config: 6 | global: 7 | evaluation_interval: 1m 8 | scrape_interval: 1m 9 | scrape_timeout: 10s 10 | target_allocator: 11 | collector_id: ${POD_NAME} 12 | endpoint: http://test-targetallocator:80 13 | interval: 30s 14 | service: 15 | pipelines: 16 | metrics: 17 | exporters: 18 | - debug 19 | receivers: 20 | - prometheus 21 | -------------------------------------------------------------------------------- /internal/manifests/collector/testdata/http_sd_config_servicemonitor_test.yaml: -------------------------------------------------------------------------------- 1 | processors: 2 | receivers: 3 | prometheus: 4 | config: 5 | scrape_configs: 6 | - job_name: serviceMonitor/test/test/0 7 | 8 | static_configs: 9 | - targets: ["prom.domain:1001", "prom.domain:1002", "prom.domain:1003"] 10 | labels: 11 | my: label 12 | 13 | file_sd_configs: 14 | - files: 15 | - file2.json 16 | 17 | exporters: 18 | debug: 19 | 20 | service: 21 | pipelines: 22 | metrics: 23 | receivers: [prometheus] 24 | exporters: [debug] 25 | -------------------------------------------------------------------------------- /internal/manifests/collector/testdata/http_sd_config_ta_test.yaml: -------------------------------------------------------------------------------- 1 | processors: 2 | receivers: 3 | prometheus: 4 | config: 5 | scrape_configs: 6 | - job_name: prometheus 7 | 8 | static_configs: 9 | - targets: ["prom.domain:9001", "prom.domain:9002", "prom.domain:9003"] 10 | labels: 11 | my: label 12 | target_allocator: 13 | collector_id: ${POD_NAME} 14 | endpoint: http://test-sd-targetallocator:80 15 | interval: 60s 16 | 17 | exporters: 18 | debug: 19 | 20 | service: 21 | pipelines: 22 | metrics: 23 | receivers: [prometheus] 24 | exporters: [debug] 25 | -------------------------------------------------------------------------------- /internal/manifests/collector/testdata/ingress_testdata.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | receivers: 3 | otlp: 4 | protocols: 5 | grpc: 6 | endpoint: 0.0.0.0:12345 7 | otlp/test: 8 | protocols: 9 | grpc: 10 | endpoint: 0.0.0.0:98765 11 | exporters: 12 | debug: 13 | verbosity: detailed 14 | 15 | service: 16 | pipelines: 17 | traces: 18 | receivers: [otlp, otlp/test] 19 | exporters: [nop] 20 | -------------------------------------------------------------------------------- /internal/manifests/collector/testdata/prometheus-exporter.yaml: -------------------------------------------------------------------------------- 1 | receivers: 2 | otlp: 3 | protocols: 4 | grpc: 5 | http: 6 | 7 | exporters: 8 | prometheus/prod: 9 | endpoint: 0.0.0.0:8884 10 | 11 | prometheus/dev: 12 | endpoint: 0.0.0.0:8885 13 | 14 | service: 15 | pipelines: 16 | metrics: 17 | receivers: [otlp] 18 | exporters: [prometheus/dev, prometheus/prod] 19 | -------------------------------------------------------------------------------- /internal/manifests/collector/testdata/rbac_resourcedetectionprocessor_k8s.yaml: -------------------------------------------------------------------------------- 1 | receivers: 2 | otlp: 3 | protocols: 4 | grpc: 5 | processors: 6 | resourcedetection: 7 | detectors: [k8snode] 8 | exporters: 9 | otlp: 10 | endpoint: "otlp:4317" 11 | service: 12 | pipelines: 13 | traces: 14 | receivers: [otlp] 15 | processors: [resourcedetection] 16 | exporters: [otlp] 17 | -------------------------------------------------------------------------------- /internal/manifests/collector/testdata/rbac_resourcedetectionprocessor_openshift.yaml: -------------------------------------------------------------------------------- 1 | receivers: 2 | otlp: 3 | protocols: 4 | grpc: 5 | processors: 6 | resourcedetection: 7 | detectors: [openshift] 8 | exporters: 9 | otlp: 10 | endpoint: "otlp:4317" 11 | service: 12 | pipelines: 13 | traces: 14 | receivers: [otlp] 15 | processors: [resourcedetection] 16 | exporters: [otlp] 17 | -------------------------------------------------------------------------------- /internal/manifests/collector/testdata/test.yaml: -------------------------------------------------------------------------------- 1 | processors: 2 | receivers: 3 | jaeger: 4 | protocols: 5 | grpc: 6 | prometheus: 7 | config: 8 | scrape_configs: 9 | - job_name: otel-collector 10 | scrape_interval: 10s 11 | static_configs: 12 | - targets: [ '0.0.0.0:8888', '0.0.0.0:9999' ] 13 | 14 | exporters: 15 | debug: 16 | 17 | service: 18 | pipelines: 19 | metrics: 20 | receivers: [prometheus, jaeger] 21 | exporters: [debug] -------------------------------------------------------------------------------- /internal/manifests/targetallocator/testdata/test.yaml: -------------------------------------------------------------------------------- 1 | processors: 2 | receivers: 3 | jaeger: 4 | protocols: 5 | grpc: 6 | prometheus: 7 | config: 8 | scrape_configs: 9 | - job_name: otel-collector 10 | scrape_interval: 10s 11 | static_configs: 12 | - targets: [ '0.0.0.0:8888', '0.0.0.0:9999' ] 13 | 14 | exporters: 15 | debug: 16 | 17 | service: 18 | pipelines: 19 | metrics: 20 | receivers: [prometheus, jaeger] 21 | exporters: [debug] 22 | -------------------------------------------------------------------------------- /kind-1.23.yaml: -------------------------------------------------------------------------------- 1 | kind: Cluster 2 | apiVersion: kind.x-k8s.io/v1alpha4 3 | networking: 4 | ipFamily: dual 5 | nodes: 6 | - role: control-plane 7 | image: kindest/node:v1.23.17@sha256:14d0a9a892b943866d7e6be119a06871291c517d279aedb816a4b4bc0ec0a5b3 8 | kubeadmConfigPatches: 9 | - | 10 | kind: InitConfiguration 11 | nodeRegistration: 12 | kubeletExtraArgs: 13 | node-labels: "ingress-ready=true" 14 | extraPortMappings: 15 | - containerPort: 80 16 | hostPort: 80 17 | protocol: TCP 18 | - containerPort: 443 19 | hostPort: 443 20 | protocol: TCP -------------------------------------------------------------------------------- /kind-1.24.yaml: -------------------------------------------------------------------------------- 1 | kind: Cluster 2 | apiVersion: kind.x-k8s.io/v1alpha4 3 | networking: 4 | ipFamily: dual 5 | nodes: 6 | - role: control-plane 7 | image: kindest/node:v1.24.15@sha256:7db4f8bea3e14b82d12e044e25e34bd53754b7f2b0e9d56df21774e6f66a70ab 8 | kubeadmConfigPatches: 9 | - | 10 | kind: InitConfiguration 11 | nodeRegistration: 12 | kubeletExtraArgs: 13 | node-labels: "ingress-ready=true" 14 | extraPortMappings: 15 | - containerPort: 80 16 | hostPort: 80 17 | protocol: TCP 18 | - containerPort: 443 19 | hostPort: 443 20 | protocol: TCP 21 | -------------------------------------------------------------------------------- /kind-1.25.yaml: -------------------------------------------------------------------------------- 1 | kind: Cluster 2 | apiVersion: kind.x-k8s.io/v1alpha4 3 | networking: 4 | ipFamily: dual 5 | nodes: 6 | - role: control-plane 7 | image: kindest/node:v1.25.11@sha256:227fa11ce74ea76a0474eeefb84cb75d8dad1b08638371ecf0e86259b35be0c8 8 | kubeadmConfigPatches: 9 | - | 10 | kind: InitConfiguration 11 | nodeRegistration: 12 | kubeletExtraArgs: 13 | node-labels: "ingress-ready=true" 14 | extraPortMappings: 15 | - containerPort: 80 16 | hostPort: 80 17 | protocol: TCP 18 | - containerPort: 443 19 | hostPort: 443 20 | protocol: TCP 21 | -------------------------------------------------------------------------------- /kind-1.26.yaml: -------------------------------------------------------------------------------- 1 | kind: Cluster 2 | apiVersion: kind.x-k8s.io/v1alpha4 3 | networking: 4 | ipFamily: dual 5 | nodes: 6 | - role: control-plane 7 | image: kindest/node:v1.26.6@sha256:6e2d8b28a5b601defe327b98bd1c2d1930b49e5d8c512e1895099e4504007adb 8 | kubeadmConfigPatches: 9 | - | 10 | kind: InitConfiguration 11 | nodeRegistration: 12 | kubeletExtraArgs: 13 | node-labels: "ingress-ready=true" 14 | extraPortMappings: 15 | - containerPort: 80 16 | hostPort: 80 17 | protocol: TCP 18 | - containerPort: 443 19 | hostPort: 443 20 | protocol: TCP 21 | -------------------------------------------------------------------------------- /kind-1.27.yaml: -------------------------------------------------------------------------------- 1 | kind: Cluster 2 | apiVersion: kind.x-k8s.io/v1alpha4 3 | networking: 4 | ipFamily: dual 5 | nodes: 6 | - role: control-plane 7 | image: kindest/node:v1.27.3@sha256:3966ac761ae0136263ffdb6cfd4db23ef8a83cba8a463690e98317add2c9ba72 8 | kubeadmConfigPatches: 9 | - | 10 | kind: InitConfiguration 11 | nodeRegistration: 12 | kubeletExtraArgs: 13 | node-labels: "ingress-ready=true" 14 | extraPortMappings: 15 | - containerPort: 80 16 | hostPort: 80 17 | protocol: TCP 18 | - containerPort: 443 19 | hostPort: 443 20 | protocol: TCP 21 | -------------------------------------------------------------------------------- /kind-1.28.yaml: -------------------------------------------------------------------------------- 1 | kind: Cluster 2 | apiVersion: kind.x-k8s.io/v1alpha4 3 | networking: 4 | ipFamily: dual 5 | nodes: 6 | - role: control-plane 7 | image: kindest/node:v1.28.0@sha256:b7a4cad12c197af3ba43202d3efe03246b3f0793f162afb40a33c923952d5b31 8 | kubeadmConfigPatches: 9 | - | 10 | kind: InitConfiguration 11 | nodeRegistration: 12 | kubeletExtraArgs: 13 | node-labels: "ingress-ready=true" 14 | extraPortMappings: 15 | - containerPort: 80 16 | hostPort: 80 17 | protocol: TCP 18 | - containerPort: 443 19 | hostPort: 443 20 | protocol: TCP 21 | -------------------------------------------------------------------------------- /kind-1.29.yaml: -------------------------------------------------------------------------------- 1 | kind: Cluster 2 | apiVersion: kind.x-k8s.io/v1alpha4 3 | networking: 4 | ipFamily: dual 5 | nodes: 6 | - role: control-plane 7 | image: kindest/node:v1.29.0@sha256:eaa1450915475849a73a9227b8f201df25e55e268e5d619312131292e324d570 8 | kubeadmConfigPatches: 9 | - | 10 | kind: InitConfiguration 11 | nodeRegistration: 12 | kubeletExtraArgs: 13 | node-labels: "ingress-ready=true" 14 | extraPortMappings: 15 | - containerPort: 80 16 | hostPort: 80 17 | protocol: TCP 18 | - containerPort: 443 19 | hostPort: 443 20 | protocol: TCP 21 | -------------------------------------------------------------------------------- /kind-1.30.yaml: -------------------------------------------------------------------------------- 1 | kind: Cluster 2 | apiVersion: kind.x-k8s.io/v1alpha4 3 | networking: 4 | ipFamily: dual 5 | nodes: 6 | - role: control-plane 7 | image: kindest/node:v1.30.2@sha256:ecfe5841b9bee4fe9690f49c118c33629fa345e3350a0c67a5a34482a99d6bba 8 | kubeadmConfigPatches: 9 | - | 10 | kind: InitConfiguration 11 | nodeRegistration: 12 | kubeletExtraArgs: 13 | node-labels: "ingress-ready=true" 14 | extraPortMappings: 15 | - containerPort: 80 16 | hostPort: 80 17 | protocol: TCP 18 | - containerPort: 443 19 | hostPort: 443 20 | protocol: TCP 21 | -------------------------------------------------------------------------------- /kind-1.31.yaml: -------------------------------------------------------------------------------- 1 | kind: Cluster 2 | apiVersion: kind.x-k8s.io/v1alpha4 3 | networking: 4 | ipFamily: dual 5 | nodes: 6 | - role: control-plane 7 | image: kindest/node:v1.31.0@sha256:25a3504b2b340954595fa7a6ed1575ef2edadf5abd83c0776a4308b64bf47c93 8 | kubeadmConfigPatches: 9 | - | 10 | kind: InitConfiguration 11 | nodeRegistration: 12 | kubeletExtraArgs: 13 | node-labels: "ingress-ready=true" 14 | extraPortMappings: 15 | - containerPort: 80 16 | hostPort: 80 17 | protocol: TCP 18 | - containerPort: 443 19 | hostPort: 443 20 | protocol: TCP 21 | -------------------------------------------------------------------------------- /kind-1.32.yaml: -------------------------------------------------------------------------------- 1 | kind: Cluster 2 | apiVersion: kind.x-k8s.io/v1alpha4 3 | networking: 4 | ipFamily: dual 5 | nodes: 6 | - role: control-plane 7 | image: kindest/node:v1.32.0@sha256:c48c62eac5da28cdadcf560d1d8616cfa6783b58f0d94cf63ad1bf49600cb027 8 | kubeadmConfigPatches: 9 | - | 10 | kind: InitConfiguration 11 | nodeRegistration: 12 | kubeletExtraArgs: 13 | node-labels: "ingress-ready=true" 14 | extraPortMappings: 15 | - containerPort: 80 16 | hostPort: 80 17 | protocol: TCP 18 | - containerPort: 443 19 | hostPort: 443 20 | protocol: TCP 21 | -------------------------------------------------------------------------------- /kind-1.33.yaml: -------------------------------------------------------------------------------- 1 | kind: Cluster 2 | apiVersion: kind.x-k8s.io/v1alpha4 3 | networking: 4 | ipFamily: dual 5 | nodes: 6 | - role: control-plane 7 | image: kindest/node:v1.33.1@sha256:050072256b9a903bd914c0b2866828150cb229cea0efe5892e2b644d5dd3b34f 8 | kubeadmConfigPatches: 9 | - | 10 | kind: InitConfiguration 11 | nodeRegistration: 12 | kubeletExtraArgs: 13 | node-labels: "ingress-ready=true" 14 | extraPortMappings: 15 | - containerPort: 80 16 | hostPort: 80 17 | protocol: TCP 18 | - containerPort: 443 19 | hostPort: 443 20 | protocol: TCP 21 | -------------------------------------------------------------------------------- /pkg/collector/upgrade/noop.go: -------------------------------------------------------------------------------- 1 | // Copyright The OpenTelemetry Authors 2 | // SPDX-License-Identifier: Apache-2.0 3 | 4 | package upgrade 5 | 6 | import ( 7 | "sigs.k8s.io/controller-runtime/pkg/client" 8 | 9 | "github.com/open-telemetry/opentelemetry-operator/apis/v1alpha1" 10 | ) 11 | 12 | // nolint unused 13 | func noop(cl client.Client, otelcol *v1alpha1.OpenTelemetryCollector) (*v1alpha1.OpenTelemetryCollector, error) { 14 | return otelcol, nil 15 | } 16 | -------------------------------------------------------------------------------- /pkg/collector/upgrade/testdata/v0_61_0-invalid.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | receivers: 3 | jaeger: 4 | protocols: 5 | grpc: 6 | remote_sampling: 7 | strategy_file: "/etc/strategy.json" 8 | strategy_file_reload_interval: 10s 9 | exporters: 10 | debug: {} 11 | service: 12 | pipelines: 13 | traces: 14 | receivers: ["jaeger"] 15 | exporters: ["debug"] 16 | -------------------------------------------------------------------------------- /pkg/collector/upgrade/testdata/v0_61_0-valid.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | receivers: 3 | jaeger: 4 | protocols: 5 | grpc: 6 | exporters: 7 | debug: {} 8 | service: 9 | pipelines: 10 | traces: 11 | receivers: ["jaeger"] 12 | exporters: ["debug"] 13 | -------------------------------------------------------------------------------- /pkg/collector/upgrade/v0_110_0.go: -------------------------------------------------------------------------------- 1 | // Copyright The OpenTelemetry Authors 2 | // SPDX-License-Identifier: Apache-2.0 3 | 4 | package upgrade 5 | 6 | import ( 7 | "github.com/open-telemetry/opentelemetry-operator/apis/v1beta1" 8 | ) 9 | 10 | func upgrade0_110_0(_ VersionUpgrade, otelcol *v1beta1.OpenTelemetryCollector) (*v1beta1.OpenTelemetryCollector, error) { //nolint:unparam 11 | envVarExpansionFeatureFlag := "-component.UseLocalHostAsDefaultHost" 12 | otelcol.Spec.OpenTelemetryCommonFields.Args = RemoveFeatureGate(otelcol.Spec.OpenTelemetryCommonFields.Args, envVarExpansionFeatureFlag) 13 | return otelcol, nil 14 | } 15 | -------------------------------------------------------------------------------- /pkg/collector/upgrade/v0_2_10.go: -------------------------------------------------------------------------------- 1 | // Copyright The OpenTelemetry Authors 2 | // SPDX-License-Identifier: Apache-2.0 3 | 4 | package upgrade 5 | 6 | import ( 7 | "github.com/open-telemetry/opentelemetry-operator/apis/v1alpha1" 8 | ) 9 | 10 | // this is our first version under otel/opentelemetry-collector. 11 | func upgrade0_2_10(u VersionUpgrade, otelcol *v1alpha1.OpenTelemetryCollector) (*v1alpha1.OpenTelemetryCollector, error) { 12 | // this is a no-op, but serves to keep the skeleton here for the future versions 13 | return otelcol, nil 14 | } 15 | -------------------------------------------------------------------------------- /tests/e2e-automatic-rbac/extra-permissions-operator/clusterresourcequotas.yaml: -------------------------------------------------------------------------------- 1 | - op: add 2 | path: /rules/- 3 | value: 4 | apiGroups: 5 | - quota.openshift.io 6 | resources: 7 | - clusterresourcequotas 8 | verbs: 9 | - get 10 | - list 11 | - watch -------------------------------------------------------------------------------- /tests/e2e-automatic-rbac/extra-permissions-operator/cronjobs.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - op: add 3 | path: /rules/- 4 | value: 5 | apiGroups: 6 | - batch 7 | resources: 8 | - cronjobs 9 | verbs: 10 | - get 11 | - list 12 | - watch 13 | -------------------------------------------------------------------------------- /tests/e2e-automatic-rbac/extra-permissions-operator/daemonsets.yaml: -------------------------------------------------------------------------------- 1 | - op: add 2 | path: /rules/- 3 | value: 4 | apiGroups: 5 | - extensions 6 | resources: 7 | - daemonsets 8 | verbs: 9 | - get 10 | - list 11 | - watch 12 | -------------------------------------------------------------------------------- /tests/e2e-automatic-rbac/extra-permissions-operator/events.yaml: -------------------------------------------------------------------------------- 1 | - op: add 2 | path: /rules/- 3 | value: 4 | apiGroups: 5 | - "" 6 | resources: 7 | - events 8 | verbs: 9 | - get 10 | - list 11 | - watch 12 | -------------------------------------------------------------------------------- /tests/e2e-automatic-rbac/extra-permissions-operator/extensions.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - op: add 3 | path: /rules/- 4 | value: 5 | apiGroups: 6 | - extensions 7 | resources: 8 | - deployments 9 | - replicasets 10 | verbs: 11 | - get 12 | - list 13 | - watch 14 | -------------------------------------------------------------------------------- /tests/e2e-automatic-rbac/extra-permissions-operator/namespaces-status.yaml: -------------------------------------------------------------------------------- 1 | - op: add 2 | path: /rules/- 3 | value: 4 | apiGroups: 5 | - "" 6 | resources: 7 | - namespaces/status 8 | verbs: 9 | - get 10 | - list 11 | - watch 12 | -------------------------------------------------------------------------------- /tests/e2e-automatic-rbac/extra-permissions-operator/namespaces.yaml: -------------------------------------------------------------------------------- 1 | - op: add 2 | path: /rules/- 3 | value: 4 | apiGroups: 5 | - "" 6 | resources: 7 | - namespaces 8 | verbs: 9 | - create 10 | - delete 11 | - get 12 | - list 13 | - patch 14 | - update 15 | - watch 16 | -------------------------------------------------------------------------------- /tests/e2e-automatic-rbac/extra-permissions-operator/nodes-proxy.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - op: add 3 | path: /rules/- 4 | value: 5 | apiGroups: 6 | - "" 7 | resources: 8 | - nodes/stats 9 | - nodes/proxy 10 | verbs: 11 | - get 12 | -------------------------------------------------------------------------------- /tests/e2e-automatic-rbac/extra-permissions-operator/nodes-spec.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - op: add 3 | path: /rules/- 4 | value: 5 | apiGroups: 6 | - "" 7 | resources: 8 | - nodes/spec 9 | verbs: 10 | - get 11 | - list 12 | - watch 13 | -------------------------------------------------------------------------------- /tests/e2e-automatic-rbac/extra-permissions-operator/nodes.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - op: add 3 | path: /rules/- 4 | value: 5 | apiGroups: 6 | - "" 7 | resources: 8 | - nodes 9 | verbs: 10 | - get 11 | - list 12 | - watch 13 | -------------------------------------------------------------------------------- /tests/e2e-automatic-rbac/extra-permissions-operator/pod-status.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - op: add 3 | path: /rules/- 4 | value: 5 | apiGroups: 6 | - "" 7 | resources: 8 | - pods/status 9 | verbs: 10 | - get 11 | - list 12 | - watch 13 | -------------------------------------------------------------------------------- /tests/e2e-automatic-rbac/extra-permissions-operator/rbac.yaml: -------------------------------------------------------------------------------- 1 | - op: add 2 | path: /rules/- 3 | value: 4 | apiGroups: 5 | - rbac.authorization.k8s.io 6 | resources: 7 | - clusterrolebindings 8 | - clusterroles 9 | verbs: 10 | - create 11 | - delete 12 | - get 13 | - list 14 | - patch 15 | - update 16 | - watch 17 | -------------------------------------------------------------------------------- /tests/e2e-automatic-rbac/extra-permissions-operator/replicaset.yaml: -------------------------------------------------------------------------------- 1 | - op: add 2 | path: /rules/- 3 | value: 4 | apiGroups: 5 | - apps 6 | resources: 7 | - replicasets 8 | verbs: 9 | - get 10 | - list 11 | - watch 12 | -------------------------------------------------------------------------------- /tests/e2e-automatic-rbac/extra-permissions-operator/replicationcontrollers.yaml: -------------------------------------------------------------------------------- 1 | - op: add 2 | path: /rules/- 3 | value: 4 | apiGroups: 5 | - "" 6 | resources: 7 | - replicationcontrollers 8 | - replicationcontrollers/status 9 | verbs: 10 | - get 11 | - list 12 | - watch 13 | -------------------------------------------------------------------------------- /tests/e2e-automatic-rbac/extra-permissions-operator/resourcequotas.yaml: -------------------------------------------------------------------------------- 1 | - op: add 2 | path: /rules/- 3 | value: 4 | apiGroups: 5 | - "" 6 | resources: 7 | - resourcequotas 8 | verbs: 9 | - get 10 | - list 11 | - watch 12 | -------------------------------------------------------------------------------- /tests/e2e-automatic-rbac/processor-k8sattributes/00-install.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: chainsaw-k8sattributes 5 | -------------------------------------------------------------------------------- /tests/e2e-automatic-rbac/processor-k8sattributes/01-install.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: opentelemetry.io/v1alpha1 2 | kind: OpenTelemetryCollector 3 | metadata: 4 | name: simplest 5 | namespace: chainsaw-k8sattributes 6 | spec: 7 | config: | 8 | receivers: 9 | otlp: 10 | protocols: 11 | grpc: 12 | http: 13 | processors: 14 | k8sattributes: 15 | exporters: 16 | debug: 17 | service: 18 | pipelines: 19 | traces: 20 | receivers: [otlp] 21 | processors: [k8sattributes] 22 | exporters: [debug] 23 | -------------------------------------------------------------------------------- /tests/e2e-automatic-rbac/processor-resourcedetection/00-install.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: chainsaw-resourcedetection -------------------------------------------------------------------------------- /tests/e2e-automatic-rbac/receiver-k8scluster/00-install.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: chainsaw-k8s-cluster 5 | -------------------------------------------------------------------------------- /tests/e2e-automatic-rbac/receiver-k8scluster/01-install.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: opentelemetry.io/v1alpha1 2 | kind: OpenTelemetryCollector 3 | metadata: 4 | name: simplest 5 | namespace: chainsaw-k8s-cluster 6 | spec: 7 | config: | 8 | receivers: 9 | k8s_cluster: 10 | processors: 11 | exporters: 12 | debug: 13 | service: 14 | pipelines: 15 | traces: 16 | receivers: [k8s_cluster] 17 | processors: [] 18 | exporters: [debug] 19 | -------------------------------------------------------------------------------- /tests/e2e-automatic-rbac/receiver-k8scluster/02-install.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: opentelemetry.io/v1alpha1 2 | kind: OpenTelemetryCollector 3 | metadata: 4 | name: simplest 5 | namespace: chainsaw-k8s-cluster 6 | spec: 7 | config: | 8 | receivers: 9 | k8s_cluster: 10 | distribution: openshift 11 | processors: 12 | exporters: 13 | debug: 14 | service: 15 | pipelines: 16 | traces: 17 | receivers: [k8s_cluster] 18 | processors: [] 19 | exporters: [debug] 20 | -------------------------------------------------------------------------------- /tests/e2e-automatic-rbac/receiver-k8sevents/00-install.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: chainsaw-k8s-events 5 | -------------------------------------------------------------------------------- /tests/e2e-automatic-rbac/receiver-k8sevents/01-install.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: opentelemetry.io/v1alpha1 2 | kind: OpenTelemetryCollector 3 | metadata: 4 | name: simplest 5 | namespace: chainsaw-k8s-events 6 | spec: 7 | config: | 8 | receivers: 9 | k8s_events: 10 | processors: 11 | exporters: 12 | debug: 13 | service: 14 | pipelines: 15 | traces: 16 | receivers: [k8s_events] 17 | processors: [] 18 | exporters: [debug] 19 | -------------------------------------------------------------------------------- /tests/e2e-automatic-rbac/receiver-k8sevents/chainsaw-test.yaml: -------------------------------------------------------------------------------- 1 | # yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json 2 | apiVersion: chainsaw.kyverno.io/v1alpha1 3 | kind: Test 4 | metadata: 5 | creationTimestamp: null 6 | name: receiver-k8sevents 7 | spec: 8 | steps: 9 | - name: create-namespace 10 | try: 11 | - apply: 12 | file: 00-install.yaml 13 | - name: default-config 14 | try: 15 | - apply: 16 | file: 01-install.yaml 17 | - assert: 18 | file: 01-assert.yaml 19 | -------------------------------------------------------------------------------- /tests/e2e-automatic-rbac/receiver-k8sobjects/00-install.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: chainsaw-k8sobjects 5 | -------------------------------------------------------------------------------- /tests/e2e-automatic-rbac/receiver-k8sobjects/01-install.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: opentelemetry.io/v1beta1 2 | kind: OpenTelemetryCollector 3 | metadata: 4 | name: simplest 5 | namespace: chainsaw-k8sobjects 6 | spec: 7 | config: 8 | receivers: 9 | k8sobjects: 10 | auth_type: serviceAccount 11 | objects: 12 | - name: pods 13 | mode: pull 14 | processors: 15 | exporters: 16 | debug: 17 | service: 18 | pipelines: 19 | traces: 20 | receivers: [k8sobjects] 21 | processors: [] 22 | exporters: [debug] 23 | -------------------------------------------------------------------------------- /tests/e2e-automatic-rbac/receiver-k8sobjects/chainsaw-test.yaml: -------------------------------------------------------------------------------- 1 | # yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json 2 | apiVersion: chainsaw.kyverno.io/v1alpha1 3 | kind: Test 4 | metadata: 5 | creationTimestamp: null 6 | name: receiver-k8sobjects 7 | spec: 8 | steps: 9 | - name: create-namespace 10 | try: 11 | - apply: 12 | file: 00-install.yaml 13 | - name: pod-pull-config 14 | try: 15 | - apply: 16 | file: 01-install.yaml 17 | - assert: 18 | file: 01-assert.yaml 19 | -------------------------------------------------------------------------------- /tests/e2e-automatic-rbac/receiver-kubeletstats/00-install.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: chainsaw-kubeletstats -------------------------------------------------------------------------------- /tests/e2e-automatic-rbac/receiver-kubeletstats/01-install.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: opentelemetry.io/v1alpha1 2 | kind: OpenTelemetryCollector 3 | metadata: 4 | name: simplest 5 | namespace: chainsaw-kubeletstats 6 | spec: 7 | config: | 8 | receivers: 9 | kubeletstats: 10 | auth_type: "" 11 | processors: 12 | exporters: 13 | debug: 14 | service: 15 | pipelines: 16 | traces: 17 | receivers: [kubeletstats] 18 | processors: [] 19 | exporters: [debug] 20 | -------------------------------------------------------------------------------- /tests/e2e-automatic-rbac/receiver-kubeletstats/02-install.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: opentelemetry.io/v1alpha1 2 | kind: OpenTelemetryCollector 3 | metadata: 4 | name: simplest 5 | namespace: chainsaw-kubeletstats 6 | spec: 7 | config: | 8 | receivers: 9 | kubeletstats: 10 | extra_metadata_labels: 11 | - container.id 12 | processors: 13 | exporters: 14 | debug: 15 | service: 16 | pipelines: 17 | traces: 18 | receivers: [kubeletstats] 19 | processors: [] 20 | exporters: [debug] 21 | -------------------------------------------------------------------------------- /tests/e2e-automatic-rbac/receiver-kubeletstats/03-install.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: opentelemetry.io/v1alpha1 2 | kind: OpenTelemetryCollector 3 | metadata: 4 | name: simplest 5 | namespace: chainsaw-kubeletstats 6 | spec: 7 | config: | 8 | receivers: 9 | kubeletstats: 10 | extra_metadata_labels: 11 | - container.id 12 | processors: 13 | exporters: 14 | debug: 15 | service: 16 | pipelines: 17 | traces: 18 | receivers: [kubeletstats] 19 | processors: [] 20 | exporters: [debug] 21 | -------------------------------------------------------------------------------- /tests/e2e-autoscale/autoscale/02-assert.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: opentelemetry.io/v1alpha1 2 | kind: OpenTelemetryCollector 3 | metadata: 4 | name: simplest-set-utilization 5 | status: 6 | scale: 7 | replicas: 2 8 | -------------------------------------------------------------------------------- /tests/e2e-autoscale/autoscale/03-assert.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: opentelemetry.io/v1alpha1 2 | kind: OpenTelemetryCollector 3 | metadata: 4 | name: simplest-set-utilization 5 | status: 6 | scale: 7 | replicas: 1 8 | -------------------------------------------------------------------------------- /tests/e2e-crd-validations/sidecar/00-install-priority-class.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: opentelemetry.io/v1beta1 2 | kind: OpenTelemetryCollector 3 | metadata: 4 | name: sidecar-priorityclass 5 | spec: 6 | mode: sidecar 7 | priorityClassName: "priority" 8 | config: 9 | receivers: 10 | otlp: 11 | protocols: 12 | grpc: {} 13 | http: {} 14 | 15 | exporters: 16 | debug: {} 17 | 18 | service: 19 | pipelines: 20 | traces: 21 | receivers: [otlp] 22 | exporters: [debug] 23 | -------------------------------------------------------------------------------- /tests/e2e-crd-validations/sidecar/01-install-tolerations.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: opentelemetry.io/v1beta1 2 | kind: OpenTelemetryCollector 3 | metadata: 4 | name: sidecar-tolerations 5 | spec: 6 | mode: sidecar 7 | tolerations: 8 | - key: "key1" 9 | operator: "Equal" 10 | value: "value1" 11 | effect: "NoSchedule" 12 | config: 13 | receivers: 14 | otlp: 15 | protocols: 16 | grpc: {} 17 | http: {} 18 | 19 | exporters: 20 | debug: {} 21 | 22 | service: 23 | pipelines: 24 | traces: 25 | receivers: [otlp] 26 | exporters: [debug] 27 | -------------------------------------------------------------------------------- /tests/e2e-crd-validations/sidecar/03-install-additional-containers.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: opentelemetry.io/v1beta1 2 | kind: OpenTelemetryCollector 3 | metadata: 4 | name: sidecar-additional-containers 5 | spec: 6 | mode: sidecar 7 | additionalContainers: 8 | - name: some 9 | config: 10 | receivers: 11 | otlp: 12 | protocols: 13 | grpc: {} 14 | http: {} 15 | 16 | exporters: 17 | debug: {} 18 | 19 | service: 20 | pipelines: 21 | traces: 22 | receivers: [otlp] 23 | exporters: [debug] 24 | -------------------------------------------------------------------------------- /tests/e2e-instrumentation/instrumentation-apache-httpd/00-install-collector.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: opentelemetry.io/v1beta1 2 | kind: OpenTelemetryCollector 3 | metadata: 4 | name: sidecar 5 | spec: 6 | config: 7 | receivers: 8 | otlp: 9 | protocols: 10 | grpc: {} 11 | http: {} 12 | 13 | processors: {} 14 | 15 | exporters: 16 | debug: 17 | verbosity: detailed 18 | 19 | service: 20 | pipelines: 21 | traces: 22 | receivers: [otlp] 23 | exporters: [debug] 24 | mode: sidecar 25 | -------------------------------------------------------------------------------- /tests/e2e-instrumentation/instrumentation-apache-multicontainer/00-install-collector.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: opentelemetry.io/v1beta1 2 | kind: OpenTelemetryCollector 3 | metadata: 4 | name: sidecar 5 | spec: 6 | config: 7 | receivers: 8 | otlp: 9 | protocols: 10 | grpc: {} 11 | http: {} 12 | 13 | processors: {} 14 | 15 | exporters: 16 | debug: 17 | verbosity: detailed 18 | 19 | service: 20 | pipelines: 21 | traces: 22 | receivers: [otlp] 23 | exporters: [debug] 24 | mode: sidecar 25 | -------------------------------------------------------------------------------- /tests/e2e-instrumentation/instrumentation-dotnet-multicontainer/00-install-instrumentation.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: opentelemetry.io/v1alpha1 2 | kind: Instrumentation 3 | metadata: 4 | name: dotnet 5 | spec: 6 | env: 7 | - name: OTEL_TRACES_SAMPLER 8 | value: always_on 9 | exporter: 10 | endpoint: http://localhost:4318 11 | propagators: 12 | - b3multi 13 | dotnet: 14 | env: 15 | # Make the test faster by exporting metrics sooner 16 | - name: OTEL_METRIC_EXPORT_INTERVAL 17 | value: "30000" 18 | -------------------------------------------------------------------------------- /tests/e2e-instrumentation/instrumentation-dotnet-musl/00-install-collector.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: opentelemetry.io/v1beta1 2 | kind: OpenTelemetryCollector 3 | metadata: 4 | name: sidecar 5 | spec: 6 | config: 7 | receivers: 8 | otlp: 9 | protocols: 10 | grpc: {} 11 | http: {} 12 | 13 | processors: {} 14 | 15 | exporters: 16 | debug: 17 | verbosity: detailed 18 | 19 | service: 20 | pipelines: 21 | traces: 22 | receivers: [otlp] 23 | exporters: [debug] 24 | metrics: 25 | receivers: [otlp] 26 | exporters: [debug] 27 | mode: sidecar 28 | -------------------------------------------------------------------------------- /tests/e2e-instrumentation/instrumentation-dotnet-musl/00-install-instrumentation.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: opentelemetry.io/v1alpha1 2 | kind: Instrumentation 3 | metadata: 4 | name: dotnet 5 | spec: 6 | env: 7 | - name: OTEL_TRACES_SAMPLER 8 | value: always_on 9 | exporter: 10 | endpoint: http://localhost:4318 11 | propagators: 12 | - b3multi 13 | dotnet: 14 | env: 15 | # Make the test faster by exporting metrics sooner 16 | - name: OTEL_METRIC_EXPORT_INTERVAL 17 | value: "30000" 18 | -------------------------------------------------------------------------------- /tests/e2e-instrumentation/instrumentation-dotnet/00-install-collector.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: opentelemetry.io/v1beta1 2 | kind: OpenTelemetryCollector 3 | metadata: 4 | name: sidecar 5 | spec: 6 | config: 7 | receivers: 8 | otlp: 9 | protocols: 10 | grpc: {} 11 | http: {} 12 | 13 | processors: {} 14 | 15 | exporters: 16 | debug: 17 | verbosity: detailed 18 | 19 | service: 20 | pipelines: 21 | traces: 22 | receivers: [otlp] 23 | exporters: [debug] 24 | metrics: 25 | receivers: [otlp] 26 | exporters: [debug] 27 | mode: sidecar 28 | -------------------------------------------------------------------------------- /tests/e2e-instrumentation/instrumentation-dotnet/00-install-instrumentation.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: opentelemetry.io/v1alpha1 2 | kind: Instrumentation 3 | metadata: 4 | name: dotnet 5 | spec: 6 | env: 7 | - name: OTEL_TRACES_SAMPLER 8 | value: always_on 9 | exporter: 10 | endpoint: http://localhost:4318 11 | propagators: 12 | - b3multi 13 | dotnet: 14 | env: 15 | # Make the test faster by exporting metrics sooner 16 | - name: OTEL_METRIC_EXPORT_INTERVAL 17 | value: "30000" 18 | -------------------------------------------------------------------------------- /tests/e2e-instrumentation/instrumentation-go/00-install-collector.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: opentelemetry.io/v1beta1 2 | kind: OpenTelemetryCollector 3 | metadata: 4 | name: sidecar 5 | spec: 6 | mode: sidecar 7 | config: 8 | receivers: 9 | otlp: 10 | protocols: 11 | grpc: {} 12 | http: {} 13 | 14 | processors: {} 15 | 16 | exporters: 17 | debug: 18 | verbosity: detailed 19 | 20 | service: 21 | pipelines: 22 | traces: 23 | receivers: [otlp] 24 | exporters: [debug] 25 | -------------------------------------------------------------------------------- /tests/e2e-instrumentation/instrumentation-go/00-install-instrumentation.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: opentelemetry.io/v1alpha1 2 | kind: Instrumentation 3 | metadata: 4 | name: go 5 | spec: 6 | env: 7 | - name: OTEL_TRACES_EXPORTER 8 | value: otlp 9 | - name: OTEL_EXPORTER_OTLP_ENDPOINT 10 | value: http://localhost:4318 11 | - name: OTEL_EXPORTER_OTLP_TIMEOUT 12 | value: "20000" 13 | - name: OTEL_TRACES_SAMPLER 14 | value: always_on 15 | - name: SPLUNK_TRACE_RESPONSE_HEADER_ENABLED 16 | value: "true" 17 | exporter: 18 | endpoint: http://localhost:4318 19 | propagators: 20 | - jaeger 21 | - b3 22 | -------------------------------------------------------------------------------- /tests/e2e-instrumentation/instrumentation-go/01-add-scc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: otel-instrumentation-go 5 | -------------------------------------------------------------------------------- /tests/e2e-instrumentation/instrumentation-go/add-scc.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [[ "$(kubectl api-resources --api-group=operator.openshift.io -o name)" ]]; then 4 | kubectl apply -f scc.yaml 5 | oc adm policy add-scc-to-user otel-go-instrumentation -z otel-instrumentation-go -n $NAMESPACE 6 | fi 7 | -------------------------------------------------------------------------------- /tests/e2e-instrumentation/instrumentation-go/scc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: security.openshift.io/v1 2 | kind: SecurityContextConstraints 3 | metadata: 4 | name: otel-go-instrumentation 5 | allowHostDirVolumePlugin: true 6 | allowPrivilegeEscalation: true 7 | allowPrivilegedContainer: true 8 | fsGroup: 9 | type: RunAsAny 10 | runAsUser: 11 | type: RunAsAny 12 | seLinuxContext: 13 | type: RunAsAny 14 | seccompProfiles: 15 | - '*' 16 | supplementalGroups: 17 | type: RunAsAny 18 | -------------------------------------------------------------------------------- /tests/e2e-instrumentation/instrumentation-java-other-ns/02-install-collector.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: opentelemetry.io/v1beta1 2 | kind: OpenTelemetryCollector 3 | metadata: 4 | name: sidecar 5 | spec: 6 | config: 7 | receivers: 8 | otlp: 9 | protocols: 10 | grpc: {} 11 | http: {} 12 | 13 | processors: {} 14 | 15 | exporters: 16 | debug: 17 | verbosity: detailed 18 | 19 | service: 20 | pipelines: 21 | traces: 22 | receivers: [otlp] 23 | exporters: [debug] 24 | metrics: 25 | receivers: [otlp] 26 | exporters: [debug] 27 | mode: sidecar 28 | -------------------------------------------------------------------------------- /tests/e2e-instrumentation/instrumentation-java-tls/.gitignore: -------------------------------------------------------------------------------- 1 | *.crt 2 | *.key -------------------------------------------------------------------------------- /tests/e2e-instrumentation/instrumentation-java/00-install-collector.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: opentelemetry.io/v1beta1 2 | kind: OpenTelemetryCollector 3 | metadata: 4 | name: sidecar 5 | spec: 6 | config: 7 | receivers: 8 | otlp: 9 | protocols: 10 | grpc: {} 11 | http: {} 12 | 13 | processors: {} 14 | 15 | exporters: 16 | debug: 17 | verbosity: detailed 18 | 19 | service: 20 | pipelines: 21 | traces: 22 | receivers: [otlp] 23 | exporters: [debug] 24 | metrics: 25 | receivers: [otlp] 26 | exporters: [debug] 27 | mode: sidecar 28 | -------------------------------------------------------------------------------- /tests/e2e-instrumentation/instrumentation-nginx-contnr-secctx/00-install-collector.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: opentelemetry.io/v1beta1 2 | kind: OpenTelemetryCollector 3 | metadata: 4 | name: sidecar 5 | spec: 6 | config: 7 | receivers: 8 | otlp: 9 | protocols: 10 | grpc: {} 11 | http: {} 12 | 13 | processors: {} 14 | 15 | exporters: 16 | debug: 17 | verbosity: detailed 18 | 19 | service: 20 | pipelines: 21 | traces: 22 | receivers: [otlp] 23 | exporters: [debug] 24 | mode: sidecar 25 | -------------------------------------------------------------------------------- /tests/e2e-instrumentation/instrumentation-nginx-multicontainer/00-install-collector.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: opentelemetry.io/v1beta1 2 | kind: OpenTelemetryCollector 3 | metadata: 4 | name: sidecar 5 | spec: 6 | config: 7 | receivers: 8 | otlp: 9 | protocols: 10 | grpc: {} 11 | http: {} 12 | 13 | processors: {} 14 | 15 | exporters: 16 | debug: 17 | verbosity: detailed 18 | 19 | service: 20 | pipelines: 21 | traces: 22 | receivers: [otlp] 23 | exporters: [debug] 24 | mode: sidecar 25 | -------------------------------------------------------------------------------- /tests/e2e-instrumentation/instrumentation-nginx/00-install-collector.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: opentelemetry.io/v1beta1 2 | kind: OpenTelemetryCollector 3 | metadata: 4 | name: sidecar 5 | spec: 6 | config: 7 | receivers: 8 | otlp: 9 | protocols: 10 | grpc: {} 11 | http: {} 12 | 13 | processors: {} 14 | 15 | exporters: 16 | debug: 17 | verbosity: detailed 18 | 19 | service: 20 | pipelines: 21 | traces: 22 | receivers: [otlp] 23 | exporters: [debug] 24 | mode: sidecar 25 | -------------------------------------------------------------------------------- /tests/e2e-instrumentation/instrumentation-nodejs-volume/00-install-collector.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: opentelemetry.io/v1beta1 2 | kind: OpenTelemetryCollector 3 | metadata: 4 | name: sidecar 5 | spec: 6 | config: 7 | receivers: 8 | otlp: 9 | protocols: 10 | grpc: {} 11 | http: {} 12 | 13 | processors: {} 14 | 15 | exporters: 16 | debug: 17 | verbosity: detailed 18 | 19 | service: 20 | pipelines: 21 | traces: 22 | receivers: [otlp] 23 | exporters: [debug] 24 | metrics: 25 | receivers: [otlp] 26 | exporters: [debug] 27 | mode: sidecar 28 | -------------------------------------------------------------------------------- /tests/e2e-instrumentation/instrumentation-nodejs/00-install-collector.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: opentelemetry.io/v1beta1 2 | kind: OpenTelemetryCollector 3 | metadata: 4 | name: sidecar 5 | spec: 6 | config: 7 | receivers: 8 | otlp: 9 | protocols: 10 | grpc: {} 11 | http: {} 12 | 13 | processors: {} 14 | 15 | exporters: 16 | debug: 17 | verbosity: detailed 18 | 19 | service: 20 | pipelines: 21 | traces: 22 | receivers: [otlp] 23 | exporters: [debug] 24 | metrics: 25 | receivers: [otlp] 26 | exporters: [debug] 27 | mode: sidecar 28 | -------------------------------------------------------------------------------- /tests/e2e-instrumentation/instrumentation-python-musl/00-install-collector.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: opentelemetry.io/v1beta1 2 | kind: OpenTelemetryCollector 3 | metadata: 4 | name: sidecar 5 | spec: 6 | config: 7 | receivers: 8 | otlp: 9 | protocols: 10 | grpc: {} 11 | http: {} 12 | 13 | processors: {} 14 | 15 | exporters: 16 | debug: 17 | verbosity: detailed 18 | 19 | service: 20 | pipelines: 21 | traces: 22 | receivers: [otlp] 23 | exporters: [debug] 24 | metrics: 25 | receivers: [otlp] 26 | exporters: [debug] 27 | mode: sidecar 28 | -------------------------------------------------------------------------------- /tests/e2e-instrumentation/instrumentation-python/00-install-collector.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: opentelemetry.io/v1beta1 2 | kind: OpenTelemetryCollector 3 | metadata: 4 | name: sidecar 5 | spec: 6 | config: 7 | receivers: 8 | otlp: 9 | protocols: 10 | grpc: {} 11 | http: {} 12 | 13 | processors: {} 14 | 15 | exporters: 16 | debug: 17 | verbosity: detailed 18 | 19 | service: 20 | pipelines: 21 | traces: 22 | receivers: [otlp] 23 | exporters: [debug] 24 | mode: sidecar 25 | -------------------------------------------------------------------------------- /tests/e2e-instrumentation/instrumentation-sdk/00-install-collector.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: opentelemetry.io/v1alpha1 2 | kind: OpenTelemetryCollector 3 | metadata: 4 | name: sidecar 5 | spec: 6 | config: | 7 | receivers: 8 | otlp: 9 | protocols: 10 | grpc: 11 | http: 12 | processors: 13 | 14 | exporters: 15 | debug: 16 | 17 | service: 18 | pipelines: 19 | traces: 20 | receivers: [otlp] 21 | exporters: [debug] 22 | mode: sidecar 23 | -------------------------------------------------------------------------------- /tests/e2e-instrumentation/instrumentation-sdk/00-install-instrumentation.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: opentelemetry.io/v1alpha1 2 | kind: Instrumentation 3 | metadata: 4 | name: sdk-only 5 | spec: 6 | env: 7 | - name: SPLUNK_TRACE_RESPONSE_HEADER_ENABLED 8 | value: "true" 9 | exporter: 10 | endpoint: http://localhost:4317 11 | propagators: 12 | - jaeger 13 | - b3 14 | sampler: 15 | type: parentbased_traceidratio 16 | argument: "0.25" 17 | nodejs: 18 | env: 19 | - name: OTEL_NODEJS_DEBUG 20 | value: "true" 21 | python: 22 | env: 23 | - name: OTEL_ENV_VAR 24 | value: "true" 25 | -------------------------------------------------------------------------------- /tests/e2e-metadata-filters/annotations/00-error.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | name: test-annotations-collector 5 | annotations: 6 | annotation.filter.out: "true" 7 | configmanagement.gke.io/token: "asdfasdf" 8 | spec: 9 | updateStrategy: 10 | rollingUpdate: 11 | maxSurge: 0 12 | maxUnavailable: 1 13 | type: RollingUpdate 14 | status: 15 | numberMisscheduled: 0 16 | (desiredNumberScheduled == numberReady): true 17 | -------------------------------------------------------------------------------- /tests/e2e-metadata-filters/annotations/00-install.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: opentelemetry.io/v1alpha1 2 | kind: OpenTelemetryCollector 3 | metadata: 4 | name: test-annotations 5 | annotations: 6 | annotation.filter.out: "true" 7 | configmanagement.gke.io/token: "asdfasdf" 8 | spec: 9 | mode: daemonset 10 | config: | 11 | receivers: 12 | jaeger: 13 | protocols: 14 | grpc: 15 | processors: 16 | 17 | exporters: 18 | debug: 19 | 20 | service: 21 | pipelines: 22 | traces: 23 | receivers: [jaeger] 24 | exporters: [debug] 25 | -------------------------------------------------------------------------------- /tests/e2e-metadata-filters/annotations/01-error.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | name: test-annotations-collector 5 | annotations: 6 | annotation.filter.out: "true" 7 | spec: 8 | updateStrategy: 9 | rollingUpdate: 10 | maxSurge: 0 11 | maxUnavailable: 1 12 | type: RollingUpdate 13 | status: 14 | numberMisscheduled: 0 15 | (desiredNumberScheduled == numberReady): true 16 | -------------------------------------------------------------------------------- /tests/e2e-metadata-filters/annotations/01-patch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: opentelemetry.io/v1alpha1 2 | kind: OpenTelemetryCollector 3 | metadata: 4 | name: test-annotations 5 | annotations: 6 | annotation.filter.out: "false" 7 | -------------------------------------------------------------------------------- /tests/e2e-metadata-filters/annotations/chainsaw-test.yaml: -------------------------------------------------------------------------------- 1 | # yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json 2 | apiVersion: chainsaw.kyverno.io/v1alpha1 3 | kind: Test 4 | metadata: 5 | creationTimestamp: null 6 | name: smoke-pod-annotations 7 | spec: 8 | steps: 9 | - name: step-00 10 | try: 11 | - apply: 12 | file: 00-install.yaml 13 | - error: 14 | file: 00-error.yaml 15 | - name: step-01 16 | try: 17 | - patch: 18 | file: 01-patch.yaml 19 | - error: 20 | file: 01-error.yaml 21 | -------------------------------------------------------------------------------- /tests/e2e-metadata-filters/labels/00-error.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | name: test-annotations-collector 5 | annotations: 6 | annotation.filter.out: "true" 7 | spec: 8 | updateStrategy: 9 | rollingUpdate: 10 | maxSurge: 0 11 | maxUnavailable: 1 12 | type: RollingUpdate 13 | status: 14 | numberMisscheduled: 0 15 | (desiredNumberScheduled == numberReady): true 16 | -------------------------------------------------------------------------------- /tests/e2e-metadata-filters/labels/00-install.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: opentelemetry.io/v1alpha1 2 | kind: OpenTelemetryCollector 3 | metadata: 4 | name: test-labels 5 | labels: 6 | annotation.filter.out: "true" 7 | spec: 8 | mode: daemonset 9 | config: | 10 | receivers: 11 | jaeger: 12 | protocols: 13 | grpc: 14 | processors: 15 | 16 | exporters: 17 | debug: 18 | 19 | service: 20 | pipelines: 21 | traces: 22 | receivers: [jaeger] 23 | exporters: [debug] 24 | -------------------------------------------------------------------------------- /tests/e2e-metadata-filters/labels/01-error.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | name: test-annotations-collector 5 | annotations: 6 | annotation.filter.out: "true" 7 | spec: 8 | updateStrategy: 9 | rollingUpdate: 10 | maxSurge: 0 11 | maxUnavailable: 1 12 | type: RollingUpdate 13 | status: 14 | numberMisscheduled: 0 15 | (desiredNumberScheduled == numberReady): true 16 | -------------------------------------------------------------------------------- /tests/e2e-metadata-filters/labels/01-patch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: opentelemetry.io/v1alpha1 2 | kind: OpenTelemetryCollector 3 | metadata: 4 | name: test-labels 5 | labels: 6 | annotation.filter.out: "false" 7 | -------------------------------------------------------------------------------- /tests/e2e-metadata-filters/labels/chainsaw-test.yaml: -------------------------------------------------------------------------------- 1 | # yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json 2 | apiVersion: chainsaw.kyverno.io/v1alpha1 3 | kind: Test 4 | metadata: 5 | creationTimestamp: null 6 | name: smoke-pod-annotations 7 | spec: 8 | steps: 9 | - name: step-00 10 | try: 11 | - apply: 12 | file: 00-install.yaml 13 | - error: 14 | file: 00-error.yaml 15 | - name: step-01 16 | try: 17 | - patch: 18 | file: 01-patch.yaml 19 | - error: 20 | file: 01-error.yaml 21 | -------------------------------------------------------------------------------- /tests/e2e-multi-instrumentation/instrumentation-multi-multicontainer-go/00-install-collector.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: opentelemetry.io/v1alpha1 2 | kind: OpenTelemetryCollector 3 | metadata: 4 | name: sidecar 5 | spec: 6 | mode: sidecar 7 | config: | 8 | receivers: 9 | otlp: 10 | protocols: 11 | grpc: 12 | http: 13 | processors: 14 | 15 | exporters: 16 | debug: 17 | 18 | service: 19 | pipelines: 20 | traces: 21 | receivers: [otlp] 22 | exporters: [debug] 23 | -------------------------------------------------------------------------------- /tests/e2e-multi-instrumentation/instrumentation-multi-multicontainer-go/01-add-scc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: otel-instrumentation-go 5 | -------------------------------------------------------------------------------- /tests/e2e-multi-instrumentation/instrumentation-multi-multicontainer-go/add-scc.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [[ "$(kubectl api-resources --api-group=operator.openshift.io -o name)" ]]; then 4 | kubectl apply -f scc.yaml 5 | oc adm policy add-scc-to-user otel-go-instrumentation -z otel-instrumentation-go -n $NAMESPACE 6 | fi 7 | -------------------------------------------------------------------------------- /tests/e2e-multi-instrumentation/instrumentation-multi-multicontainer-go/scc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: security.openshift.io/v1 2 | kind: SecurityContextConstraints 3 | metadata: 4 | name: otel-go-instrumentation 5 | allowHostDirVolumePlugin: true 6 | allowPrivilegeEscalation: true 7 | allowPrivilegedContainer: true 8 | fsGroup: 9 | type: RunAsAny 10 | runAsUser: 11 | type: RunAsAny 12 | seLinuxContext: 13 | type: RunAsAny 14 | seccompProfiles: 15 | - '*' 16 | supplementalGroups: 17 | type: RunAsAny 18 | -------------------------------------------------------------------------------- /tests/e2e-multi-instrumentation/instrumentation-multi-multicontainer/00-install-collector.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: opentelemetry.io/v1alpha1 2 | kind: OpenTelemetryCollector 3 | metadata: 4 | name: sidecar 5 | spec: 6 | mode: sidecar 7 | config: | 8 | receivers: 9 | otlp: 10 | protocols: 11 | grpc: 12 | http: 13 | processors: 14 | 15 | exporters: 16 | debug: 17 | 18 | service: 19 | pipelines: 20 | traces: 21 | receivers: [otlp] 22 | exporters: [debug] 23 | -------------------------------------------------------------------------------- /tests/e2e-multi-instrumentation/instrumentation-multi-no-containers/00-install-collector.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: opentelemetry.io/v1alpha1 2 | kind: OpenTelemetryCollector 3 | metadata: 4 | name: sidecar 5 | spec: 6 | mode: sidecar 7 | config: | 8 | receivers: 9 | otlp: 10 | protocols: 11 | grpc: 12 | http: 13 | processors: 14 | 15 | exporters: 16 | debug: 17 | 18 | service: 19 | pipelines: 20 | traces: 21 | receivers: [otlp] 22 | exporters: [debug] 23 | -------------------------------------------------------------------------------- /tests/e2e-multi-instrumentation/instrumentation-single-instr-first-container/00-install-collector.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: opentelemetry.io/v1alpha1 2 | kind: OpenTelemetryCollector 3 | metadata: 4 | name: sidecar 5 | spec: 6 | mode: sidecar 7 | config: | 8 | receivers: 9 | otlp: 10 | protocols: 11 | grpc: 12 | http: 13 | processors: 14 | 15 | exporters: 16 | debug: 17 | 18 | service: 19 | pipelines: 20 | traces: 21 | receivers: [otlp] 22 | exporters: [debug] 23 | -------------------------------------------------------------------------------- /tests/e2e-native-sidecar/00-assert.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Pod 4 | metadata: 5 | annotations: 6 | sidecar.opentelemetry.io/inject: "true" 7 | name: myapp 8 | spec: 9 | containers: 10 | - name: myapp 11 | initContainers: 12 | - name: otc-container 13 | restartPolicy: Always 14 | status: 15 | containerStatuses: 16 | - name: myapp 17 | ready: true 18 | started: true 19 | initContainerStatuses: 20 | - name: otc-container 21 | ready: true 22 | started: true 23 | -------------------------------------------------------------------------------- /tests/e2e-native-sidecar/chainsaw-test.yaml: -------------------------------------------------------------------------------- 1 | # yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json 2 | apiVersion: chainsaw.kyverno.io/v1alpha1 3 | kind: Test 4 | metadata: 5 | creationTimestamp: null 6 | name: native-sidecar 7 | spec: 8 | steps: 9 | - name: step-00 10 | try: 11 | - apply: 12 | file: 00-install.yaml 13 | - assert: 14 | file: 00-assert.yaml 15 | -------------------------------------------------------------------------------- /tests/e2e-opampbridge/opampbridge/00-assert.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: e2e-test-app-bridge-server 5 | status: 6 | readyReplicas: 1 7 | replicas: 1 -------------------------------------------------------------------------------- /tests/e2e-opampbridge/opampbridge/02-install.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: opentelemetry.io/v1alpha1 2 | kind: OpenTelemetryCollector 3 | metadata: 4 | name: simplest 5 | labels: 6 | opentelemetry.io/opamp-reporting: "true" 7 | spec: 8 | config: | 9 | receivers: 10 | jaeger: 11 | protocols: 12 | grpc: 13 | otlp: 14 | protocols: 15 | grpc: 16 | http: 17 | processors: 18 | 19 | exporters: 20 | debug: 21 | 22 | service: 23 | pipelines: 24 | traces: 25 | receivers: [jaeger,otlp] 26 | exporters: [debug] -------------------------------------------------------------------------------- /tests/e2e-openshift/export-to-cluster-logging-lokistack/generate-logs-assert.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | name: telemetrygen 5 | namespace: chainsaw-incllogs 6 | status: 7 | active: 1 8 | -------------------------------------------------------------------------------- /tests/e2e-openshift/export-to-cluster-logging-lokistack/install-loki.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: loki.grafana.com/v1 2 | kind: LokiStack 3 | metadata: 4 | name: logging-loki 5 | namespace: openshift-logging 6 | spec: 7 | size: 1x.demo 8 | storage: 9 | schemas: 10 | - version: v13 11 | effectiveDate: "2023-10-15" 12 | secret: 13 | name: logging-loki-s3 14 | type: s3 15 | storageClassName: ($STORAGE_CLASS_NAME) 16 | tenants: 17 | mode: openshift-logging 18 | -------------------------------------------------------------------------------- /tests/e2e-openshift/export-to-cluster-logging-lokistack/logging-uiplugin-assert.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: logging 5 | namespace: ($COO_NAMESPACE) 6 | status: 7 | availableReplicas: 1 8 | readyReplicas: 1 9 | replicas: 1 10 | -------------------------------------------------------------------------------- /tests/e2e-openshift/export-to-cluster-logging-lokistack/logging-uiplugin.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: observability.openshift.io/v1alpha1 2 | kind: UIPlugin 3 | metadata: 4 | name: logging 5 | namespace: ($COO_NAMESPACE) 6 | spec: 7 | logging: 8 | logsLimit: 20 9 | lokiStack: 10 | name: logging-loki 11 | timeout: 300s 12 | type: Logging 13 | -------------------------------------------------------------------------------- /tests/e2e-openshift/kafka/01-assert.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kafka.strimzi.io/v1beta2 2 | kind: KafkaTopic 3 | metadata: 4 | name: otlp-spans 5 | namespace: chainsaw-kafka 6 | spec: 7 | config: 8 | retention.ms: 300000 9 | segment.bytes: 1073741824 10 | partitions: 1 11 | replicas: 1 12 | status: 13 | topicName: otlp-spans 14 | -------------------------------------------------------------------------------- /tests/e2e-openshift/kafka/01-create-kafka-topics.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kafka.strimzi.io/v1beta1 2 | kind: KafkaTopic 3 | metadata: 4 | labels: 5 | strimzi.io/cluster: my-cluster 6 | name: otlp-spans 7 | namespace: chainsaw-kafka 8 | spec: 9 | config: 10 | retention.ms: 300000 11 | segment.bytes: 1073741824 12 | partitions: 1 13 | replicas: 1 14 | -------------------------------------------------------------------------------- /tests/e2e-openshift/kafka/02-otel-kakfa-receiver.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: opentelemetry.io/v1alpha1 2 | kind: OpenTelemetryCollector 3 | metadata: 4 | name: kafka-receiver 5 | namespace: chainsaw-kafka 6 | spec: 7 | mode: "deployment" 8 | config: | 9 | receivers: 10 | kafka/traces: 11 | brokers: ["my-cluster-kafka-brokers.chainsaw-kafka.svc:9092"] 12 | protocol_version: 3.5.0 13 | topic: otlp-spans 14 | exporters: 15 | debug: 16 | verbosity: detailed 17 | service: 18 | pipelines: 19 | traces: 20 | receivers: [kafka/traces] 21 | exporters: [debug] 22 | -------------------------------------------------------------------------------- /tests/e2e-openshift/kafka/04-assert.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | labels: 5 | app: telemetrygen-traces 6 | job-name: telemetrygen-traces 7 | name: telemetrygen-traces 8 | status: 9 | succeeded: 1 10 | -------------------------------------------------------------------------------- /tests/e2e-openshift/monitoring/00-workload-monitoring.yaml: -------------------------------------------------------------------------------- 1 | # oc -n openshift-user-workload-monitoring get pod 2 | # https://docs.openshift.com/container-platform/4.13/monitoring/enabling-monitoring-for-user-defined-projects.html#accessing-metrics-from-outside-cluster_enabling-monitoring-for-user-defined-projects 3 | 4 | apiVersion: v1 5 | kind: ConfigMap 6 | metadata: 7 | name: cluster-monitoring-config 8 | namespace: openshift-monitoring 9 | data: 10 | config.yaml: | 11 | enableUserWorkload: true 12 | alertmanagerMain: 13 | enableUserAlertmanagerConfig: true 14 | -------------------------------------------------------------------------------- /tests/e2e-openshift/monitoring/02-assert.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | name: telemetrygen-traces 5 | status: 6 | active: 1 7 | 8 | --- 9 | apiVersion: batch/v1 10 | kind: Job 11 | metadata: 12 | name: telemetrygen-metrics 13 | status: 14 | active: 1 15 | 16 | --- 17 | apiVersion: batch/v1 18 | kind: Job 19 | metadata: 20 | name: telemetrygen-logs 21 | status: 22 | active: 1 23 | -------------------------------------------------------------------------------- /tests/e2e-openshift/monitoring/04-assert.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | app.kubernetes.io/managed-by: opentelemetry-operator 6 | app.kubernetes.io/name: cluster-collector2-collector 7 | name: cluster-collector2-collector 8 | spec: 9 | endpoints: 10 | - port: prometheus 11 | selector: 12 | matchLabels: 13 | app.kubernetes.io/managed-by: opentelemetry-operator 14 | operator.opentelemetry.io/collector-service-type: base 15 | -------------------------------------------------------------------------------- /tests/e2e-openshift/monitoring/04-use-prometheus-exporter.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: opentelemetry.io/v1alpha1 2 | kind: OpenTelemetryCollector 3 | metadata: 4 | name: cluster-collector2 5 | spec: 6 | mode: deployment 7 | observability: 8 | metrics: 9 | enableMetrics: true 10 | config: | 11 | receivers: 12 | otlp: 13 | protocols: 14 | grpc: 15 | http: 16 | processors: 17 | exporters: 18 | prometheus: 19 | endpoint: "0.0.0.0:8091" 20 | service: 21 | pipelines: 22 | metrics: 23 | receivers: [otlp] 24 | exporters: [prometheus] 25 | -------------------------------------------------------------------------------- /tests/e2e-openshift/multi-cluster/00-assert.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: project.openshift.io/v1 2 | kind: Project 3 | metadata: 4 | name: chainsaw-multi-cluster-send 5 | status: 6 | phase: Active 7 | 8 | apiVersion: project.openshift.io/v1 9 | kind: Project 10 | metadata: 11 | name: chainsaw-multi-cluster-receive 12 | status: 13 | phase: Active 14 | -------------------------------------------------------------------------------- /tests/e2e-openshift/multi-cluster/00-create-namespaces.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: chainsaw-multi-cluster-send 5 | 6 | --- 7 | apiVersion: v1 8 | kind: Namespace 9 | metadata: 10 | name: chainsaw-multi-cluster-receive 11 | -------------------------------------------------------------------------------- /tests/e2e-openshift/multi-cluster/01-assert.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: StatefulSet 3 | metadata: 4 | name: tempo-multicluster 5 | namespace: chainsaw-multi-cluster-receive 6 | status: 7 | availableReplicas: 1 8 | readyReplicas: 1 9 | replicas: 1 -------------------------------------------------------------------------------- /tests/e2e-openshift/multi-cluster/01-create-tempo.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: tempo.grafana.com/v1alpha1 2 | kind: TempoMonolithic 3 | metadata: 4 | name: multicluster 5 | namespace: chainsaw-multi-cluster-receive 6 | spec: {} 7 | -------------------------------------------------------------------------------- /tests/e2e-openshift/multi-cluster/04-assert.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | name: generate-traces-http 5 | namespace: chainsaw-multi-cluster-send 6 | status: 7 | succeeded: 1 8 | 9 | --- 10 | apiVersion: batch/v1 11 | kind: Job 12 | metadata: 13 | name: generate-traces-grpc 14 | namespace: chainsaw-multi-cluster-send 15 | status: 16 | succeeded: 1 -------------------------------------------------------------------------------- /tests/e2e-openshift/multi-cluster/assert-verify-traces.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | name: verify-traces-http 5 | status: 6 | succeeded: 1 7 | 8 | --- 9 | apiVersion: batch/v1 10 | kind: Job 11 | metadata: 12 | name: verify-traces-grpc 13 | status: 14 | succeeded: 1 -------------------------------------------------------------------------------- /tests/e2e-openshift/must-gather/install-collector-sidecar.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: opentelemetry.io/v1beta1 2 | kind: OpenTelemetryCollector 3 | metadata: 4 | name: sidecar 5 | spec: 6 | config: 7 | receivers: 8 | otlp: 9 | protocols: 10 | grpc: {} 11 | http: {} 12 | processors: {} 13 | 14 | exporters: 15 | debug: 16 | 17 | service: 18 | pipelines: 19 | traces: 20 | receivers: [otlp] 21 | exporters: [debug] 22 | mode: sidecar 23 | -------------------------------------------------------------------------------- /tests/e2e-openshift/otlp-metrics-traces/00-assert.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: StatefulSet 3 | metadata: 4 | name: tempo-otlpmetrics 5 | namespace: chainsaw-otlp-metrics 6 | status: 7 | availableReplicas: 1 8 | readyReplicas: 1 9 | replicas: 1 10 | -------------------------------------------------------------------------------- /tests/e2e-openshift/otlp-metrics-traces/00-install-tempo.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: tempo.grafana.com/v1alpha1 2 | kind: TempoMonolithic 3 | metadata: 4 | name: otlpmetrics 5 | namespace: chainsaw-otlp-metrics 6 | spec: {} -------------------------------------------------------------------------------- /tests/e2e-openshift/otlp-metrics-traces/01-workload-monitoring.yaml: -------------------------------------------------------------------------------- 1 | # oc -n openshift-user-workload-monitoring get pod 2 | # https://docs.openshift.com/container-platform/4.13/monitoring/enabling-monitoring-for-user-defined-projects.html#accessing-metrics-from-outside-cluster_enabling-monitoring-for-user-defined-projects 3 | 4 | apiVersion: v1 5 | kind: ConfigMap 6 | metadata: 7 | name: cluster-monitoring-config 8 | namespace: openshift-monitoring 9 | data: 10 | config.yaml: | 11 | enableUserWorkload: true 12 | -------------------------------------------------------------------------------- /tests/e2e-openshift/otlp-metrics-traces/assert-verify-traces.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | name: verify-traces 5 | status: 6 | succeeded: 1 -------------------------------------------------------------------------------- /tests/e2e-openshift/route/00-install.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: opentelemetry.io/v1alpha1 3 | kind: OpenTelemetryCollector 4 | metadata: 5 | name: simplest 6 | spec: 7 | mode: "deployment" 8 | ingress: 9 | type: route 10 | annotations: 11 | something.com: "true" 12 | route: 13 | termination: "insecure" 14 | 15 | config: | 16 | receivers: 17 | otlp: 18 | protocols: 19 | grpc: 20 | http: 21 | 22 | exporters: 23 | debug: 24 | 25 | service: 26 | pipelines: 27 | traces: 28 | receivers: [otlp] 29 | exporters: [debug] 30 | -------------------------------------------------------------------------------- /tests/e2e-openshift/scrape-in-cluster-monitoring/create-clusterrolebinding-assert.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: chainsaw-scrape-in-cluster-monitoring-binding 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: cluster-monitoring-view 9 | subjects: 10 | - kind: ServiceAccount 11 | name: otel-collector 12 | namespace: chainsaw-scrape-in-cluster-monitoring 13 | 14 | --- 15 | apiVersion: v1 16 | kind: ConfigMap 17 | metadata: 18 | name: cabundle 19 | namespace: chainsaw-scrape-in-cluster-monitoring -------------------------------------------------------------------------------- /tests/e2e-pdb/pdb/00-assert.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: policy/v1 2 | kind: PodDisruptionBudget 3 | metadata: 4 | name: pdb-collector 5 | spec: 6 | selector: 7 | matchLabels: 8 | app.kubernetes.io/component: opentelemetry-collector 9 | maxUnavailable: 1 10 | -------------------------------------------------------------------------------- /tests/e2e-pdb/pdb/00-install.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: opentelemetry.io/v1alpha1 2 | kind: OpenTelemetryCollector 3 | metadata: 4 | name: pdb 5 | spec: 6 | resources: 7 | limits: 8 | cpu: 500m 9 | memory: 128Mi 10 | requests: 11 | cpu: 5m 12 | memory: 64Mi 13 | 14 | config: | 15 | receivers: 16 | otlp: 17 | protocols: 18 | grpc: 19 | http: 20 | processors: 21 | 22 | exporters: 23 | debug: 24 | 25 | service: 26 | pipelines: 27 | traces: 28 | receivers: [otlp] 29 | exporters: [debug] -------------------------------------------------------------------------------- /tests/e2e-pdb/pdb/01-assert.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: policy/v1 2 | kind: PodDisruptionBudget 3 | metadata: 4 | name: pdb-collector 5 | spec: 6 | selector: 7 | matchLabels: 8 | app.kubernetes.io/component: opentelemetry-collector 9 | minAvailable: 1 10 | -------------------------------------------------------------------------------- /tests/e2e-pdb/pdb/01-install.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: opentelemetry.io/v1alpha1 2 | kind: OpenTelemetryCollector 3 | metadata: 4 | name: pdb 5 | spec: 6 | podDisruptionBudget: 7 | minAvailable: 1 8 | resources: 9 | limits: 10 | cpu: 500m 11 | memory: 128Mi 12 | requests: 13 | cpu: 5m 14 | memory: 64Mi 15 | 16 | config: | 17 | receivers: 18 | otlp: 19 | protocols: 20 | grpc: 21 | http: 22 | processors: 23 | 24 | exporters: 25 | debug: 26 | 27 | service: 28 | pipelines: 29 | traces: 30 | receivers: [otlp] 31 | exporters: [debug] -------------------------------------------------------------------------------- /tests/e2e-pdb/pdb/chainsaw-test.yaml: -------------------------------------------------------------------------------- 1 | # yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json 2 | apiVersion: chainsaw.kyverno.io/v1alpha1 3 | kind: Test 4 | metadata: 5 | creationTimestamp: null 6 | name: pdb 7 | spec: 8 | steps: 9 | - name: step-00 10 | try: 11 | - apply: 12 | file: 00-install.yaml 13 | - assert: 14 | file: 00-assert.yaml 15 | - name: step-01 16 | try: 17 | - apply: 18 | file: 01-install.yaml 19 | - assert: 20 | file: 01-assert.yaml 21 | -------------------------------------------------------------------------------- /tests/e2e-pdb/target-allocator/00-assert.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: policy/v1 2 | kind: PodDisruptionBudget 3 | metadata: 4 | name: pdb-targetallocator 5 | spec: 6 | selector: 7 | matchLabels: 8 | app.kubernetes.io/component: opentelemetry-targetallocator 9 | minAvailable: 1 10 | 11 | -------------------------------------------------------------------------------- /tests/e2e-pdb/target-allocator/chainsaw-test.yaml: -------------------------------------------------------------------------------- 1 | # yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json 2 | apiVersion: chainsaw.kyverno.io/v1alpha1 3 | kind: Test 4 | metadata: 5 | creationTimestamp: null 6 | name: target-allocator 7 | spec: 8 | steps: 9 | - name: step-00 10 | try: 11 | - apply: 12 | file: 00-install.yaml 13 | - assert: 14 | file: 00-assert.yaml 15 | -------------------------------------------------------------------------------- /tests/e2e-prometheuscr/create-pm-prometheus-exporters/01-install-app.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: app-with-sidecar 5 | namespace: create-pm-prometheus 6 | spec: 7 | selector: 8 | matchLabels: 9 | app: pod-with-sidecar 10 | replicas: 1 11 | template: 12 | metadata: 13 | labels: 14 | app: pod-with-sidecar 15 | annotations: 16 | sidecar.opentelemetry.io/inject: "true" 17 | spec: 18 | containers: 19 | - name: myapp 20 | image: ghcr.io/open-telemetry/opentelemetry-operator/e2e-test-app-python:main 21 | -------------------------------------------------------------------------------- /tests/e2e-prometheuscr/create-pm-prometheus-exporters/chainsaw-test.yaml: -------------------------------------------------------------------------------- 1 | # yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json 2 | apiVersion: chainsaw.kyverno.io/v1alpha1 3 | kind: Test 4 | metadata: 5 | creationTimestamp: null 6 | name: create-pm-prometheus-exporters 7 | spec: 8 | steps: 9 | - name: step-00 10 | try: 11 | - apply: 12 | file: 00-install.yaml 13 | - name: step-01 14 | try: 15 | - apply: 16 | file: 01-install-app.yaml 17 | - assert: 18 | file: 01-assert.yaml 19 | -------------------------------------------------------------------------------- /tests/e2e-prometheuscr/create-sm-prometheus-exporters/00-install.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: create-sm-prometheus 5 | -------------------------------------------------------------------------------- /tests/e2e-prometheuscr/create-sm-prometheus-exporters/02-install.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: opentelemetry.io/v1alpha1 2 | kind: OpenTelemetryCollector 3 | metadata: 4 | name: simplest 5 | namespace: create-sm-prometheus 6 | spec: 7 | observability: 8 | metrics: 9 | enableMetrics: true 10 | config: | 11 | receivers: 12 | otlp: 13 | protocols: 14 | grpc: 15 | http: 16 | 17 | exporters: 18 | prometheus/prod: 19 | endpoint: 0.0.0.0:8884 20 | 21 | service: 22 | pipelines: 23 | metrics: 24 | receivers: [otlp] 25 | exporters: [prometheus/prod] 26 | -------------------------------------------------------------------------------- /tests/e2e-prometheuscr/create-sm-prometheus-exporters/03-assert.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: simplest-collector 5 | namespace: create-sm-prometheus 6 | spec: 7 | ports: 8 | - appProtocol: grpc 9 | name: otlp-grpc 10 | port: 4317 11 | protocol: TCP 12 | targetPort: 4317 13 | - appProtocol: http 14 | name: otlp-http 15 | port: 4318 16 | protocol: TCP 17 | targetPort: 4318 18 | - name: prometheus-prod 19 | port: 9091 20 | protocol: TCP 21 | targetPort: 9091 22 | -------------------------------------------------------------------------------- /tests/e2e-prometheuscr/create-sm-prometheus-exporters/03-install.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: opentelemetry.io/v1alpha1 2 | kind: OpenTelemetryCollector 3 | metadata: 4 | name: simplest 5 | namespace: create-sm-prometheus 6 | spec: 7 | observability: 8 | metrics: 9 | enableMetrics: true 10 | config: | 11 | receivers: 12 | otlp: 13 | protocols: 14 | grpc: 15 | http: 16 | 17 | exporters: 18 | prometheus/prod: 19 | endpoint: 0.0.0.0:9091 20 | 21 | service: 22 | pipelines: 23 | metrics: 24 | receivers: [otlp] 25 | exporters: [prometheus/prod] 26 | -------------------------------------------------------------------------------- /tests/e2e-prometheuscr/create-sm-prometheus-exporters/04-error.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | name: simplest-collector 5 | namespace: create-sm-prometheus 6 | -------------------------------------------------------------------------------- /tests/e2e-prometheuscr/create-sm-prometheus-exporters/04-install.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: opentelemetry.io/v1alpha1 2 | kind: OpenTelemetryCollector 3 | metadata: 4 | name: simplest 5 | namespace: create-sm-prometheus 6 | spec: 7 | observability: 8 | metrics: 9 | enableMetrics: false 10 | config: | 11 | receivers: 12 | otlp: 13 | protocols: 14 | grpc: 15 | http: 16 | 17 | exporters: 18 | prometheus/prod: 19 | endpoint: 0.0.0.0:9091 20 | 21 | service: 22 | pipelines: 23 | metrics: 24 | receivers: [otlp] 25 | exporters: [prometheus/prod] 26 | -------------------------------------------------------------------------------- /tests/e2e-prometheuscr/create-sm-prometheus-exporters/07-error.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | name: simplest-collector 5 | namespace: create-sm-prometheus 6 | -------------------------------------------------------------------------------- /tests/e2e-ta-collector-mtls/certmanager-permissions/certmanager.yaml: -------------------------------------------------------------------------------- 1 | - op: add 2 | path: /rules/- 3 | value: 4 | apiGroups: 5 | - cert-manager.io 6 | resources: 7 | - issuers 8 | - certificaterequests 9 | - certificates 10 | verbs: 11 | - create 12 | - get 13 | - list 14 | - watch 15 | - update 16 | - patch 17 | - delete -------------------------------------------------------------------------------- /tests/e2e-ta-collector-mtls/ta-collector-mtls/01-assert.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: metrics-app-secret 5 | --- 6 | apiVersion: apps/v1 7 | kind: Deployment 8 | metadata: 9 | name: metrics-app 10 | labels: 11 | app: metrics-app 12 | status: 13 | observedGeneration: 1 14 | readyReplicas: 1 15 | replicas: 1 16 | --- 17 | apiVersion: v1 18 | kind: Service 19 | metadata: 20 | name: metrics-service 21 | labels: 22 | app: metrics-app 23 | --- 24 | apiVersion: monitoring.coreos.com/v1 25 | kind: ServiceMonitor 26 | metadata: 27 | name: metrics-servicemonitor 28 | labels: 29 | app: metrics-app -------------------------------------------------------------------------------- /tests/e2e-ta-collector-mtls/ta-collector-mtls/02-assert.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | name: check-metrics 5 | status: 6 | succeeded: 1 7 | --- 8 | apiVersion: batch/v1 9 | kind: Job 10 | metadata: 11 | name: check-ta-jobs 12 | status: 13 | succeeded: 1 14 | --- 15 | apiVersion: batch/v1 16 | kind: Job 17 | metadata: 18 | name: check-ta-scrape-configs 19 | status: 20 | succeeded: 1 -------------------------------------------------------------------------------- /tests/e2e-ta-collector-mtls/ta-disabled/chainsaw-test.yaml: -------------------------------------------------------------------------------- 1 | # yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json 2 | apiVersion: chainsaw.kyverno.io/v1alpha1 3 | kind: Test 4 | metadata: 5 | creationTimestamp: null 6 | name: ta-disabled 7 | spec: 8 | steps: 9 | - name: step-00 10 | try: 11 | - apply: 12 | template: true 13 | file: 00-install.yaml 14 | - assert: 15 | file: 00-assert.yaml 16 | catch: 17 | - podLogs: 18 | selector: app.kubernetes.io/managed-by=opentelemetry-operator 19 | -------------------------------------------------------------------------------- /tests/e2e-targetallocator-cr/targetallocator-label/02-change-collector-config.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: opentelemetry.io/v1beta1 3 | kind: OpenTelemetryCollector 4 | metadata: 5 | name: ta 6 | labels: 7 | opentelemetry.io/target-allocator: ta 8 | spec: 9 | mode: statefulset 10 | config: 11 | receivers: 12 | prometheus: 13 | config: 14 | scrape_configs: [] 15 | exporters: 16 | debug: {} 17 | service: 18 | pipelines: 19 | metrics: 20 | receivers: [prometheus] 21 | exporters: [debug] 22 | 23 | -------------------------------------------------------------------------------- /tests/e2e-targetallocator-cr/targetallocator-label/03-assert.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | data: 4 | targetallocator.yaml: | 5 | allocation_strategy: consistent-hashing 6 | collector_selector: null 7 | filter_strategy: "" 8 | kind: ConfigMap 9 | metadata: 10 | name: ta-targetallocator -------------------------------------------------------------------------------- /tests/e2e-targetallocator/targetallocator-features/01-assert.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: opentelemetry-targetallocator 6 | status: 7 | containerStatuses: 8 | - name: ta-container 9 | restartCount: 0 10 | -------------------------------------------------------------------------------- /tests/e2e-targetallocator/targetallocator-features/02-assert.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: stateful-targetallocator 5 | labels: 6 | app.kubernetes.io/name: test 7 | spec: 8 | selector: 9 | matchLabels: 10 | app.kubernetes.io/name: test 11 | template: 12 | metadata: 13 | labels: 14 | app.kubernetes.io/name: test 15 | status: 16 | replicas: 1 17 | readyReplicas: 1 18 | -------------------------------------------------------------------------------- /tests/e2e-targetallocator/targetallocator-features/02-install.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: opentelemetry.io/v1alpha1 2 | kind: OpenTelemetryCollector 3 | metadata: 4 | name: stateful 5 | labels: 6 | app.kubernetes.io/name: test 7 | 8 | -------------------------------------------------------------------------------- /tests/e2e-targetallocator/targetallocator-kubernetessd/01-assert.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | name: check-metrics 5 | status: 6 | succeeded: 1 7 | --- 8 | apiVersion: batch/v1 9 | kind: Job 10 | metadata: 11 | name: check-ta-jobs 12 | status: 13 | succeeded: 1 14 | --- 15 | apiVersion: batch/v1 16 | kind: Job 17 | metadata: 18 | name: check-ta-scrape-configs 19 | status: 20 | succeeded: 1 21 | -------------------------------------------------------------------------------- /tests/e2e-targetallocator/targetallocator-namespace/assert-job-failed.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | name: check-metrics 5 | namespace: ($namespace) 6 | status: 7 | failed: 1 8 | -------------------------------------------------------------------------------- /tests/e2e-targetallocator/targetallocator-namespace/assert-job-succeeded.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | name: check-metrics 5 | namespace: ($namespace) 6 | status: 7 | succeeded: 1 8 | -------------------------------------------------------------------------------- /tests/e2e-targetallocator/targetallocator-namespace/assert-workloads-ready.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: StatefulSet 3 | metadata: 4 | name: prometheus-cr-collector 5 | namespace: ($namespace) 6 | status: 7 | readyReplicas: 1 8 | replicas: 1 9 | --- 10 | apiVersion: apps/v1 11 | kind: Deployment 12 | metadata: 13 | name: cr-targetallocator 14 | namespace: ($namespace) 15 | status: 16 | readyReplicas: 1 17 | replicas: 1 18 | -------------------------------------------------------------------------------- /tests/e2e-targetallocator/targetallocator-namespace/resources/serviceaccounts.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | automountServiceAccountToken: true 3 | kind: ServiceAccount 4 | metadata: 5 | name: ta 6 | namespace: ($namespace) 7 | --- 8 | apiVersion: v1 9 | automountServiceAccountToken: true 10 | kind: ServiceAccount 11 | metadata: 12 | name: collector 13 | namespace: ($namespace) 14 | -------------------------------------------------------------------------------- /tests/e2e-targetallocator/targetallocator-namespace/resources/ta-allow-namespaces.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: opentelemetry.io/v1alpha1 2 | kind: TargetAllocator 3 | metadata: 4 | name: cr 5 | namespace: ($namespace) 6 | spec: 7 | args: 8 | "zap-log-level": "debug" 9 | prometheusCR: 10 | enabled: true 11 | allowNamespaces: 12 | - ($namespace) 13 | scrapeInterval: 1s 14 | scrapeConfigSelector: {} 15 | probeSelector: {} 16 | serviceMonitorSelector: {} 17 | podMonitorSelector: {} 18 | observability: 19 | metrics: 20 | disablePrometheusAnnotations: true 21 | enableMetrics: true 22 | serviceAccount: ta 23 | -------------------------------------------------------------------------------- /tests/e2e-targetallocator/targetallocator-namespace/resources/ta-deny-namespaces.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: opentelemetry.io/v1alpha1 2 | kind: TargetAllocator 3 | metadata: 4 | name: cr 5 | namespace: ($namespace) 6 | spec: 7 | args: 8 | "zap-log-level": "debug" 9 | prometheusCR: 10 | enabled: true 11 | denyNamespaces: 12 | - ($denyNamespaces) 13 | scrapeInterval: 1s 14 | scrapeConfigSelector: {} 15 | probeSelector: {} 16 | serviceMonitorSelector: {} 17 | podMonitorSelector: {} 18 | observability: 19 | metrics: 20 | disablePrometheusAnnotations: true 21 | enableMetrics: true 22 | serviceAccount: ta 23 | -------------------------------------------------------------------------------- /tests/e2e-targetallocator/targetallocator-prometheuscr/01-assert.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | name: check-metrics 5 | status: 6 | succeeded: 1 7 | --- 8 | apiVersion: batch/v1 9 | kind: Job 10 | metadata: 11 | name: check-ta-jobs 12 | status: 13 | succeeded: 1 14 | --- 15 | apiVersion: batch/v1 16 | kind: Job 17 | metadata: 18 | name: check-ta-scrape-configs 19 | status: 20 | succeeded: 1 -------------------------------------------------------------------------------- /tests/e2e-upgrade/upgrade-test/00-assert.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: simplest-collector 5 | annotations: 6 | operatorVersion: "v0.86.0" 7 | spec: 8 | template: 9 | metadata: 10 | labels: 11 | app.kubernetes.io/version: latest 12 | status: 13 | readyReplicas: 1 14 | -------------------------------------------------------------------------------- /tests/e2e-upgrade/upgrade-test/00-install.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: opentelemetry.io/v1alpha1 2 | kind: OpenTelemetryCollector 3 | metadata: 4 | name: simplest 5 | annotations: 6 | operatorVersion: "v0.86.0" 7 | spec: 8 | replicas: 1 9 | config: | 10 | receivers: 11 | jaeger: 12 | protocols: 13 | grpc: 14 | otlp: 15 | protocols: 16 | grpc: 17 | http: 18 | processors: 19 | 20 | exporters: 21 | debug: 22 | 23 | service: 24 | pipelines: 25 | traces: 26 | receivers: [jaeger,otlp] 27 | exporters: [debug] 28 | -------------------------------------------------------------------------------- /tests/e2e-upgrade/upgrade-test/02-assert.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: simplest-collector 5 | annotations: 6 | operatorVersion: "latest" 7 | status: 8 | readyReplicas: 2 9 | -------------------------------------------------------------------------------- /tests/e2e-upgrade/upgrade-test/02-upgrade-collector.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: opentelemetry.io/v1alpha1 2 | kind: OpenTelemetryCollector 3 | metadata: 4 | name: simplest 5 | annotations: 6 | operatorVersion: "latest" 7 | spec: 8 | replicas: 2 9 | config: | 10 | receivers: 11 | jaeger: 12 | protocols: 13 | grpc: 14 | otlp: 15 | protocols: 16 | grpc: 17 | http: 18 | processors: 19 | 20 | exporters: 21 | debug: 22 | 23 | service: 24 | pipelines: 25 | traces: 26 | receivers: [jaeger,otlp] 27 | exporters: [debug] 28 | -------------------------------------------------------------------------------- /tests/e2e/additional-containers-collector/00-assert-daemonset-without-additional-containers.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: opentelemetry-collector 6 | app.kubernetes.io/managed-by: opentelemetry-operator 7 | app.kubernetes.io/name: daemonset-collector 8 | app.kubernetes.io/part-of: opentelemetry 9 | additional-containers: without 10 | spec: 11 | template: 12 | spec: 13 | (containers[?image == 'alpine' && name == 'alpine']): 14 | (length(@)): 0 15 | -------------------------------------------------------------------------------- /tests/e2e/additional-containers-collector/00-assert-deployment-without-additional-containers.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: opentelemetry-collector 6 | app.kubernetes.io/managed-by: opentelemetry-operator 7 | app.kubernetes.io/name: deployment-collector 8 | app.kubernetes.io/part-of: opentelemetry 9 | additional-containers: without 10 | spec: 11 | template: 12 | spec: 13 | (containers[?image == 'alpine' && name == 'alpine']): 14 | (length(@)): 0 15 | -------------------------------------------------------------------------------- /tests/e2e/additional-containers-collector/00-assert-statefulset-without-additional-containers.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: StatefulSet 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: opentelemetry-collector 6 | app.kubernetes.io/managed-by: opentelemetry-operator 7 | app.kubernetes.io/name: statefulset-collector 8 | app.kubernetes.io/part-of: opentelemetry 9 | additional-containers: without 10 | spec: 11 | template: 12 | spec: 13 | (containers[?image == 'alpine' && name == 'alpine']): 14 | (length(@)): 0 15 | -------------------------------------------------------------------------------- /tests/e2e/additional-containers-collector/01-assert-daemonset-with-additional-containers.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: opentelemetry-collector 6 | app.kubernetes.io/managed-by: opentelemetry-operator 7 | app.kubernetes.io/name: daemonset-collector 8 | app.kubernetes.io/part-of: opentelemetry 9 | additional-containers: with 10 | spec: 11 | template: 12 | spec: 13 | (containers[?image == 'alpine' && name == 'alpine']): 14 | (length(@)): 1 15 | (containers[?image == 'alpine' && name == 'alpine2']): 16 | (length(@)): 1 17 | -------------------------------------------------------------------------------- /tests/e2e/additional-containers-collector/01-assert-deployment-with-additional-containers.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: opentelemetry-collector 6 | app.kubernetes.io/managed-by: opentelemetry-operator 7 | app.kubernetes.io/name: deployment-collector 8 | app.kubernetes.io/part-of: opentelemetry 9 | additional-containers: with 10 | spec: 11 | template: 12 | spec: 13 | (containers[?image == 'alpine' && name == 'alpine']): 14 | (length(@)): 1 15 | (containers[?image == 'alpine' && name == 'alpine2']): 16 | (length(@)): 1 17 | -------------------------------------------------------------------------------- /tests/e2e/additional-containers-collector/01-assert-statefulset-with-additional-containers.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: StatefulSet 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: opentelemetry-collector 6 | app.kubernetes.io/managed-by: opentelemetry-operator 7 | app.kubernetes.io/name: statefulset-collector 8 | app.kubernetes.io/part-of: opentelemetry 9 | additional-containers: with 10 | spec: 11 | template: 12 | spec: 13 | (containers[?image == 'alpine' && name == 'alpine']): 14 | (length(@)): 1 15 | (containers[?image == 'alpine' && name == 'alpine2']): 16 | (length(@)): 1 17 | -------------------------------------------------------------------------------- /tests/e2e/affinity-collector/00-assert-daemonset-without-affinity.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: opentelemetry-collector 6 | app.kubernetes.io/managed-by: opentelemetry-operator 7 | app.kubernetes.io/name: daemonset-collector 8 | app.kubernetes.io/part-of: opentelemetry 9 | affinity: without 10 | spec: 11 | template: 12 | spec: 13 | (affinity == null): true 14 | -------------------------------------------------------------------------------- /tests/e2e/affinity-collector/00-assert-deployment-without-affinity.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: opentelemetry-collector 6 | app.kubernetes.io/managed-by: opentelemetry-operator 7 | app.kubernetes.io/name: deployment-collector 8 | app.kubernetes.io/part-of: opentelemetry 9 | affinity: without 10 | spec: 11 | template: 12 | spec: 13 | (affinity == null): true 14 | -------------------------------------------------------------------------------- /tests/e2e/affinity-collector/00-assert-statefulset-without-affinity.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: StatefulSet 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: opentelemetry-collector 6 | app.kubernetes.io/managed-by: opentelemetry-operator 7 | app.kubernetes.io/name: statefulset-collector 8 | app.kubernetes.io/part-of: opentelemetry 9 | affinity: without 10 | spec: 11 | template: 12 | spec: 13 | (affinity == null): true 14 | -------------------------------------------------------------------------------- /tests/e2e/affinity-collector/01-assert-daemonset-with-affinity.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: opentelemetry-collector 6 | app.kubernetes.io/managed-by: opentelemetry-operator 7 | app.kubernetes.io/name: daemonset-collector 8 | app.kubernetes.io/part-of: opentelemetry 9 | affinity: with 10 | spec: 11 | template: 12 | spec: 13 | (affinity != null): true 14 | -------------------------------------------------------------------------------- /tests/e2e/affinity-collector/01-assert-deployment-with-affinity.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: opentelemetry-collector 6 | app.kubernetes.io/managed-by: opentelemetry-operator 7 | app.kubernetes.io/name: deployment-collector 8 | app.kubernetes.io/part-of: opentelemetry 9 | affinity: with 10 | spec: 11 | template: 12 | spec: 13 | (affinity != null): true 14 | -------------------------------------------------------------------------------- /tests/e2e/affinity-collector/01-assert-statefulset-with-affinity.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: StatefulSet 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: opentelemetry-collector 6 | app.kubernetes.io/managed-by: opentelemetry-operator 7 | app.kubernetes.io/name: statefulset-collector 8 | app.kubernetes.io/part-of: opentelemetry 9 | affinity: with 10 | spec: 11 | template: 12 | spec: 13 | (affinity != null): true 14 | -------------------------------------------------------------------------------- /tests/e2e/affinity-collector/02-assert-daemonset-with-modified-affinity.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: opentelemetry-collector 6 | app.kubernetes.io/managed-by: opentelemetry-operator 7 | app.kubernetes.io/name: daemonset-collector 8 | app.kubernetes.io/part-of: opentelemetry 9 | affinity: with 10 | spec: 11 | template: 12 | spec: 13 | affinity: 14 | nodeAffinity: 15 | (requiredDuringSchedulingIgnoredDuringExecution == null): true 16 | (preferredDuringSchedulingIgnoredDuringExecution != null): true 17 | -------------------------------------------------------------------------------- /tests/e2e/affinity-collector/02-assert-deployment-with-modified-affinity.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: opentelemetry-collector 6 | app.kubernetes.io/managed-by: opentelemetry-operator 7 | app.kubernetes.io/name: deployment-collector 8 | app.kubernetes.io/part-of: opentelemetry 9 | affinity: with 10 | spec: 11 | template: 12 | spec: 13 | affinity: 14 | nodeAffinity: 15 | (requiredDuringSchedulingIgnoredDuringExecution == null): true 16 | (preferredDuringSchedulingIgnoredDuringExecution != null): true 17 | -------------------------------------------------------------------------------- /tests/e2e/affinity-collector/02-assert-statefulset-with-modified-affinity.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: StatefulSet 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: opentelemetry-collector 6 | app.kubernetes.io/managed-by: opentelemetry-operator 7 | app.kubernetes.io/name: statefulset-collector 8 | app.kubernetes.io/part-of: opentelemetry 9 | affinity: with 10 | spec: 11 | template: 12 | spec: 13 | affinity: 14 | nodeAffinity: 15 | (requiredDuringSchedulingIgnoredDuringExecution == null): true 16 | (preferredDuringSchedulingIgnoredDuringExecution != null): true 17 | -------------------------------------------------------------------------------- /tests/e2e/annotation-change-collector/00-assert-daemonset-with-extra-annotation.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | name: daemonset-collector 5 | annotations: 6 | user-annotation: "existing" 7 | spec: 8 | template: 9 | metadata: 10 | annotations: 11 | user-annotation: "existing" 12 | -------------------------------------------------------------------------------- /tests/e2e/annotation-change-collector/00-assert-deployment-with-extra-annotation.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: deployment-collector 5 | annotations: 6 | user-annotation: "existing" 7 | spec: 8 | template: 9 | metadata: 10 | annotations: 11 | user-annotation: "existing" 12 | -------------------------------------------------------------------------------- /tests/e2e/annotation-change-collector/00-assert-statefulset-with-extra-annotation.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: StatefulSet 3 | metadata: 4 | name: statefulset-collector 5 | annotations: 6 | user-annotation: "existing" 7 | spec: 8 | template: 9 | metadata: 10 | annotations: 11 | user-annotation: "existing" 12 | -------------------------------------------------------------------------------- /tests/e2e/annotation-change-collector/01-assert-daemonset-with-annotation-change.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | name: daemonset-collector 5 | annotations: 6 | user-annotation: "modified" 7 | new-annotation: "yes" 8 | spec: 9 | template: 10 | metadata: 11 | annotations: 12 | user-annotation: "modified" 13 | new-annotation: "yes" 14 | -------------------------------------------------------------------------------- /tests/e2e/annotation-change-collector/01-assert-deployment-with-annotation-change.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: deployment-collector 5 | annotations: 6 | user-annotation: "modified" 7 | new-annotation: "yes" 8 | spec: 9 | template: 10 | metadata: 11 | annotations: 12 | user-annotation: "modified" 13 | new-annotation: "yes" 14 | -------------------------------------------------------------------------------- /tests/e2e/annotation-change-collector/01-assert-statefulset-with-annotation-change.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: StatefulSet 3 | metadata: 4 | name: statefulset-collector 5 | annotations: 6 | user-annotation: "modified" 7 | new-annotation: "yes" 8 | spec: 9 | template: 10 | metadata: 11 | annotations: 12 | user-annotation: "modified" 13 | new-annotation: "yes" 14 | -------------------------------------------------------------------------------- /tests/e2e/annotation-change-collector/02-assert-daemonset-without-extra-annotation.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | name: daemonset-collector 5 | (contains(keys(annotations), 'user-annotation')): true 6 | (contains(keys(annotations), 'new-annotation')): true 7 | annotations: 8 | manual-annotation: "true" 9 | spec: 10 | template: 11 | metadata: 12 | (contains(keys(annotations), 'user-annotation')): true 13 | (contains(keys(annotations), 'new-annotation')): true 14 | annotations: 15 | manual-annotation: "true" 16 | -------------------------------------------------------------------------------- /tests/e2e/annotation-change-collector/02-assert-deployment-without-extra-annotation.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: deployment-collector 5 | (contains(keys(annotations), 'user-annotation')): true 6 | (contains(keys(annotations), 'new-annotation')): true 7 | annotations: 8 | manual-annotation: "true" 9 | spec: 10 | template: 11 | metadata: 12 | (contains(keys(annotations), 'user-annotation')): true 13 | (contains(keys(annotations), 'new-annotation')): true 14 | annotations: 15 | manual-annotation: "true" 16 | -------------------------------------------------------------------------------- /tests/e2e/annotation-change-collector/02-assert-statefulset-without-extra-annotation.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: StatefulSet 3 | metadata: 4 | name: statefulset-collector 5 | (contains(keys(annotations), 'user-annotation')): true 6 | (contains(keys(annotations), 'new-annotation')): true 7 | annotations: 8 | manual-annotation: "true" 9 | spec: 10 | template: 11 | metadata: 12 | (contains(keys(annotations), 'user-annotation')): true 13 | (contains(keys(annotations), 'new-annotation')): true 14 | annotations: 15 | manual-annotation: "true" 16 | -------------------------------------------------------------------------------- /tests/e2e/args-collector/00-assert-daemonset-without-args.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: opentelemetry-collector 6 | app.kubernetes.io/managed-by: opentelemetry-operator 7 | app.kubernetes.io/name: daemonset-collector 8 | app.kubernetes.io/part-of: opentelemetry 9 | args: without 10 | spec: 11 | template: 12 | spec: 13 | ~.(containers): 14 | name: otc-container 15 | (contains(args, '--extra-arg=yes')): false 16 | (contains(args, '--different-extra-arg=yes')): false 17 | -------------------------------------------------------------------------------- /tests/e2e/args-collector/00-assert-deployment-without-args.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: opentelemetry-collector 6 | app.kubernetes.io/managed-by: opentelemetry-operator 7 | app.kubernetes.io/name: deployment-collector 8 | app.kubernetes.io/part-of: opentelemetry 9 | args: without 10 | spec: 11 | template: 12 | spec: 13 | ~.(containers): 14 | name: otc-container 15 | (contains(args, '--extra-arg=yes')): false 16 | (contains(args, '--different-extra-arg=yes')): false 17 | -------------------------------------------------------------------------------- /tests/e2e/args-collector/00-assert-statefulset-without-args.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: StatefulSet 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: opentelemetry-collector 6 | app.kubernetes.io/managed-by: opentelemetry-operator 7 | app.kubernetes.io/name: statefulset-collector 8 | app.kubernetes.io/part-of: opentelemetry 9 | args: without 10 | spec: 11 | template: 12 | spec: 13 | ~.(containers): 14 | name: otc-container 15 | (contains(args, '--extra-arg=yes')): false 16 | (contains(args, '--different-extra-arg=yes')): false 17 | -------------------------------------------------------------------------------- /tests/e2e/args-collector/01-assert-daemonset-with-args.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: opentelemetry-collector 6 | app.kubernetes.io/managed-by: opentelemetry-operator 7 | app.kubernetes.io/name: daemonset-collector 8 | app.kubernetes.io/part-of: opentelemetry 9 | args: with 10 | spec: 11 | template: 12 | spec: 13 | ~.(containers): 14 | name: otc-container 15 | (contains(args, '--extra-arg=yes')): true 16 | -------------------------------------------------------------------------------- /tests/e2e/args-collector/01-assert-deployment-with-args.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: opentelemetry-collector 6 | app.kubernetes.io/managed-by: opentelemetry-operator 7 | app.kubernetes.io/name: deployment-collector 8 | app.kubernetes.io/part-of: opentelemetry 9 | args: with 10 | spec: 11 | template: 12 | spec: 13 | ~.(containers): 14 | name: otc-container 15 | (contains(args, '--extra-arg=yes')): true 16 | -------------------------------------------------------------------------------- /tests/e2e/args-collector/01-assert-statefulset-with-args.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: StatefulSet 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: opentelemetry-collector 6 | app.kubernetes.io/managed-by: opentelemetry-operator 7 | app.kubernetes.io/name: statefulset-collector 8 | app.kubernetes.io/part-of: opentelemetry 9 | args: with 10 | spec: 11 | template: 12 | spec: 13 | ~.(containers): 14 | name: otc-container 15 | (contains(args, '--extra-arg=yes')): true 16 | -------------------------------------------------------------------------------- /tests/e2e/args-collector/02-assert-daemonset-with-modified-args.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: opentelemetry-collector 6 | app.kubernetes.io/managed-by: opentelemetry-operator 7 | app.kubernetes.io/name: daemonset-collector 8 | app.kubernetes.io/part-of: opentelemetry 9 | args: with 10 | spec: 11 | template: 12 | spec: 13 | ~.(containers): 14 | name: otc-container 15 | (contains(args, '--extra-arg=yes')): false 16 | (contains(args, '--different-extra-arg=yes')): true 17 | -------------------------------------------------------------------------------- /tests/e2e/args-collector/02-assert-deployment-with-modified-args.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: opentelemetry-collector 6 | app.kubernetes.io/managed-by: opentelemetry-operator 7 | app.kubernetes.io/name: deployment-collector 8 | app.kubernetes.io/part-of: opentelemetry 9 | args: with 10 | spec: 11 | template: 12 | spec: 13 | ~.(containers): 14 | name: otc-container 15 | (contains(args, '--extra-arg=yes')): false 16 | (contains(args, '--different-extra-arg=yes')): true 17 | -------------------------------------------------------------------------------- /tests/e2e/args-collector/02-assert-statefulset-with-modified-args.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: StatefulSet 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: opentelemetry-collector 6 | app.kubernetes.io/managed-by: opentelemetry-operator 7 | app.kubernetes.io/name: statefulset-collector 8 | app.kubernetes.io/part-of: opentelemetry 9 | args: with 10 | spec: 11 | template: 12 | spec: 13 | ~.(containers): 14 | name: otc-container 15 | (contains(args, '--extra-arg=yes')): false 16 | (contains(args, '--different-extra-arg=yes')): true 17 | -------------------------------------------------------------------------------- /tests/e2e/daemonset-features/01-install.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: opentelemetry.io/v1alpha1 2 | kind: OpenTelemetryCollector 3 | metadata: 4 | name: daemonset 5 | spec: 6 | mode: daemonset 7 | terminationGracePeriodSeconds: 600 8 | hostNetwork: true 9 | config: | 10 | receivers: 11 | jaeger: 12 | protocols: 13 | grpc: 14 | processors: 15 | exporters: 16 | debug: 17 | service: 18 | pipelines: 19 | traces: 20 | receivers: [jaeger] 21 | exporters: [debug] 22 | -------------------------------------------------------------------------------- /tests/e2e/daemonset-features/02-assert.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | name: daemonset-collector 5 | spec: 6 | template: 7 | spec: 8 | terminationGracePeriodSeconds: 600 9 | hostNetwork: true 10 | containers: 11 | - args: 12 | - --config=/conf/collector.yaml 13 | name: otc-container 14 | -------------------------------------------------------------------------------- /tests/e2e/daemonset-features/03-assert.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: daemonset-collector 6 | app.kubernetes.io/part-of: opentelemetry 7 | status: 8 | phase: Running 9 | -------------------------------------------------------------------------------- /tests/e2e/daemonset-features/add-sa-collector.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | if [[ "$(kubectl api-resources --api-group=operator.openshift.io -o name)" ]]; then 3 | echo "Adding service account to the OpenTelemetry Collector" 4 | kubectl patch otelcol daemonset --type=merge -p '{"spec":{"serviceAccount":"otel-collector-daemonset"}}' -n $NAMESPACE 5 | fi 6 | -------------------------------------------------------------------------------- /tests/e2e/daemonset-features/add-scc-openshift.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | if [[ "$(kubectl api-resources --api-group=operator.openshift.io -o name)" ]]; then 3 | echo "Running the test against an OpenShift Cluster" 4 | echo "Creating an Service Account" 5 | echo "Creating a Security Context Constrain" 6 | echo "Setting the Service Account for the Daemonset" 7 | echo "Adding the new policy to the Service Account" 8 | kubectl apply -f scc.yaml -n $NAMESPACE 9 | oc adm policy add-scc-to-user -z otel-collector-daemonset daemonset-with-hostport -n $NAMESPACE 10 | fi 11 | -------------------------------------------------------------------------------- /tests/e2e/env-vars/00-install-collector.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: opentelemetry.io/v1alpha1 2 | kind: OpenTelemetryCollector 3 | metadata: 4 | name: sidecar 5 | spec: 6 | config: | 7 | receivers: 8 | otlp: 9 | protocols: 10 | grpc: 11 | http: 12 | processors: 13 | 14 | exporters: 15 | debug: 16 | 17 | service: 18 | pipelines: 19 | traces: 20 | receivers: [otlp] 21 | exporters: [debug] 22 | mode: sidecar 23 | -------------------------------------------------------------------------------- /tests/e2e/env-vars/00-install-instrumentation.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: opentelemetry.io/v1alpha1 2 | kind: Instrumentation 3 | metadata: 4 | name: sdk-only 5 | spec: 6 | exporter: 7 | endpoint: http://localhost:4317 -------------------------------------------------------------------------------- /tests/e2e/env-vars/01-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: my-deploy 5 | spec: 6 | selector: 7 | matchLabels: 8 | app: my-deploy 9 | replicas: 1 10 | template: 11 | metadata: 12 | labels: 13 | app: my-deploy 14 | annotations: 15 | sidecar.opentelemetry.io/inject: "true" 16 | instrumentation.opentelemetry.io/inject-sdk: "true" 17 | spec: 18 | containers: 19 | - name: myapp 20 | image: ghcr.io/open-telemetry/opentelemetry-operator/e2e-test-app-python:main 21 | -------------------------------------------------------------------------------- /tests/e2e/env-vars/03-job.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | name: my-job 5 | spec: 6 | template: 7 | metadata: 8 | annotations: 9 | sidecar.opentelemetry.io/inject: "true" 10 | instrumentation.opentelemetry.io/inject-sdk: "true" 11 | spec: 12 | restartPolicy: Never 13 | containers: 14 | - name: myapp 15 | image: ghcr.io/open-telemetry/opentelemetry-operator/e2e-test-app-python:main 16 | imagePullPolicy: IfNotPresent 17 | command: 18 | - echo 19 | - ok -------------------------------------------------------------------------------- /tests/e2e/extension/01-install-health-check-extension.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: opentelemetry.io/v1beta1 2 | kind: OpenTelemetryCollector 3 | metadata: 4 | name: health-check 5 | spec: 6 | config: 7 | extensions: 8 | health_check: {} 9 | receivers: 10 | otlp: 11 | protocols: 12 | grpc: 13 | http: 14 | processors: {} 15 | 16 | exporters: 17 | debug: {} 18 | 19 | service: 20 | extensions: 21 | - health_check 22 | pipelines: 23 | traces: 24 | receivers: [otlp] 25 | exporters: [debug] 26 | -------------------------------------------------------------------------------- /tests/e2e/ingress-subdomains/00-install.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: opentelemetry.io/v1alpha1 3 | kind: OpenTelemetryCollector 4 | metadata: 5 | name: simplest 6 | spec: 7 | mode: "deployment" 8 | ingress: 9 | type: ingress 10 | ruleType: subdomain 11 | hostname: "test.otel" 12 | annotations: 13 | something.com: "true" 14 | config: | 15 | receivers: 16 | otlp: 17 | protocols: 18 | grpc: 19 | http: 20 | 21 | exporters: 22 | debug: 23 | 24 | service: 25 | pipelines: 26 | traces: 27 | receivers: [otlp] 28 | exporters: [debug] 29 | -------------------------------------------------------------------------------- /tests/e2e/ingress/00-install.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: opentelemetry.io/v1alpha1 3 | kind: OpenTelemetryCollector 4 | metadata: 5 | name: otel-simplest 6 | spec: 7 | mode: deployment 8 | ingress: 9 | type: ingress 10 | hostname: "example.com" 11 | annotations: 12 | something.com: "true" 13 | config: | 14 | receivers: 15 | otlp: 16 | protocols: 17 | grpc: 18 | http: 19 | 20 | exporters: 21 | debug: 22 | 23 | service: 24 | pipelines: 25 | traces: 26 | receivers: [otlp] 27 | exporters: [debug] 28 | -------------------------------------------------------------------------------- /tests/e2e/ingress/01-remove-ingress.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: opentelemetry.io/v1alpha1 3 | kind: OpenTelemetryCollector 4 | metadata: 5 | name: otel-simplest 6 | spec: 7 | mode: deployment 8 | ingress: ~ 9 | config: | 10 | receivers: 11 | otlp: 12 | protocols: 13 | grpc: 14 | http: 15 | 16 | exporters: 17 | debug: 18 | 19 | service: 20 | pipelines: 21 | traces: 22 | receivers: [otlp] 23 | exporters: [debug] 24 | -------------------------------------------------------------------------------- /tests/e2e/label-change-collector/00-assert-daemonset-with-extra-label.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: opentelemetry-collector 6 | app.kubernetes.io/managed-by: opentelemetry-operator 7 | app.kubernetes.io/name: daemonset-collector 8 | app.kubernetes.io/part-of: opentelemetry 9 | user-label: "existing" 10 | spec: 11 | template: 12 | metadata: 13 | labels: 14 | user-label: "existing" 15 | -------------------------------------------------------------------------------- /tests/e2e/label-change-collector/00-assert-deployment-with-extra-label.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: opentelemetry-collector 6 | app.kubernetes.io/managed-by: opentelemetry-operator 7 | app.kubernetes.io/name: deployment-collector 8 | app.kubernetes.io/part-of: opentelemetry 9 | user-label: "existing" 10 | spec: 11 | template: 12 | metadata: 13 | labels: 14 | user-label: "existing" 15 | -------------------------------------------------------------------------------- /tests/e2e/label-change-collector/00-assert-statefulset-with-extra-label.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: StatefulSet 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: opentelemetry-collector 6 | app.kubernetes.io/managed-by: opentelemetry-operator 7 | app.kubernetes.io/name: statefulset-collector 8 | app.kubernetes.io/part-of: opentelemetry 9 | user-label: "existing" 10 | spec: 11 | template: 12 | metadata: 13 | labels: 14 | user-label: "existing" 15 | -------------------------------------------------------------------------------- /tests/e2e/label-change-collector/01-assert-daemonset-with-label-change.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: opentelemetry-collector 6 | app.kubernetes.io/managed-by: opentelemetry-operator 7 | app.kubernetes.io/name: daemonset-collector 8 | app.kubernetes.io/part-of: opentelemetry 9 | user-label: "modified" 10 | new-label: "yes" 11 | spec: 12 | template: 13 | metadata: 14 | labels: 15 | user-label: "modified" 16 | new-label: "yes" 17 | -------------------------------------------------------------------------------- /tests/e2e/label-change-collector/01-assert-deployment-with-label-change.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: opentelemetry-collector 6 | app.kubernetes.io/managed-by: opentelemetry-operator 7 | app.kubernetes.io/name: deployment-collector 8 | app.kubernetes.io/part-of: opentelemetry 9 | user-label: "modified" 10 | new-label: "yes" 11 | spec: 12 | template: 13 | metadata: 14 | labels: 15 | user-label: "modified" 16 | new-label: "yes" 17 | -------------------------------------------------------------------------------- /tests/e2e/label-change-collector/01-assert-statefulset-with-label-change.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: StatefulSet 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: opentelemetry-collector 6 | app.kubernetes.io/managed-by: opentelemetry-operator 7 | app.kubernetes.io/name: statefulset-collector 8 | app.kubernetes.io/part-of: opentelemetry 9 | user-label: "modified" 10 | new-label: "yes" 11 | spec: 12 | template: 13 | metadata: 14 | labels: 15 | user-label: "modified" 16 | new-label: "yes" 17 | -------------------------------------------------------------------------------- /tests/e2e/managed-reconcile/00-install.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: opentelemetry.io/v1alpha1 2 | kind: OpenTelemetryCollector 3 | metadata: 4 | name: simplest 5 | spec: 6 | config: | 7 | receivers: 8 | otlp: 9 | protocols: 10 | grpc: 11 | http: 12 | processors: 13 | 14 | exporters: 15 | debug: 16 | 17 | service: 18 | pipelines: 19 | traces: 20 | receivers: [otlp] 21 | exporters: [debug] -------------------------------------------------------------------------------- /tests/e2e/managed-reconcile/02-enable-reconciliation.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: opentelemetry.io/v1alpha1 2 | kind: OpenTelemetryCollector 3 | metadata: 4 | name: simplest 5 | spec: 6 | managementState: managed 7 | config: | 8 | receivers: 9 | otlp: 10 | protocols: 11 | grpc: 12 | http: 13 | processors: 14 | 15 | exporters: 16 | debug: 17 | 18 | service: 19 | pipelines: 20 | traces: 21 | receivers: [otlp] 22 | exporters: [debug] 23 | -------------------------------------------------------------------------------- /tests/e2e/multiple-configmaps/chainsaw-test.yaml: -------------------------------------------------------------------------------- 1 | # yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json 2 | apiVersion: chainsaw.kyverno.io/v1alpha1 3 | kind: Test 4 | metadata: 5 | creationTimestamp: null 6 | name: multiple-configmaps 7 | spec: 8 | steps: 9 | - name: step-00 10 | try: 11 | - apply: 12 | file: 00-install.yaml 13 | - assert: 14 | file: 00-assert.yaml 15 | -------------------------------------------------------------------------------- /tests/e2e/node-selector-collector/00-assert-daemonset-without-node-selector.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: opentelemetry-collector 6 | app.kubernetes.io/managed-by: opentelemetry-operator 7 | app.kubernetes.io/name: daemonset-collector 8 | app.kubernetes.io/part-of: opentelemetry 9 | node-selector-mode: without 10 | spec: 11 | (nodeSelector == null): true -------------------------------------------------------------------------------- /tests/e2e/node-selector-collector/00-assert-deployment-without-node-selector.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: opentelemetry-collector 6 | app.kubernetes.io/managed-by: opentelemetry-operator 7 | app.kubernetes.io/name: deployment-collector 8 | app.kubernetes.io/part-of: opentelemetry 9 | node-selector-mode: without 10 | spec: 11 | (nodeSelector == null): true -------------------------------------------------------------------------------- /tests/e2e/node-selector-collector/00-assert-statefulset-without-node-selector.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: opentelemetry-collector 6 | app.kubernetes.io/managed-by: opentelemetry-operator 7 | app.kubernetes.io/name: statefulset-collector 8 | app.kubernetes.io/part-of: opentelemetry 9 | node-selector-mode: without 10 | spec: 11 | (nodeSelector == null): true -------------------------------------------------------------------------------- /tests/e2e/node-selector-collector/01-assert-daemonset-with-node-selector.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: opentelemetry-collector 6 | app.kubernetes.io/managed-by: opentelemetry-operator 7 | app.kubernetes.io/name: daemonset-collector 8 | app.kubernetes.io/part-of: opentelemetry 9 | node-selector-mode: with 10 | spec: 11 | nodeSelector: 12 | kubernetes.io/os: linux -------------------------------------------------------------------------------- /tests/e2e/node-selector-collector/01-assert-deployment-with-node-selector.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: opentelemetry-collector 6 | app.kubernetes.io/managed-by: opentelemetry-operator 7 | app.kubernetes.io/name: deployment-collector 8 | app.kubernetes.io/part-of: opentelemetry 9 | node-selector-mode: with 10 | spec: 11 | nodeSelector: 12 | kubernetes.io/os: linux -------------------------------------------------------------------------------- /tests/e2e/node-selector-collector/01-assert-statefulset-with-node-selector.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: opentelemetry-collector 6 | app.kubernetes.io/managed-by: opentelemetry-operator 7 | app.kubernetes.io/name: statefulset-collector 8 | app.kubernetes.io/part-of: opentelemetry 9 | node-selector-mode: with 10 | spec: 11 | nodeSelector: 12 | kubernetes.io/os: linux -------------------------------------------------------------------------------- /tests/e2e/operator-restart/assert-operator-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: opentelemetry-operator 6 | control-plane: controller-manager 7 | namespace: ($OTEL_NAMESPACE) 8 | status: 9 | containerStatuses: 10 | - name: kube-rbac-proxy 11 | ready: true 12 | started: true 13 | - name: manager 14 | ready: true 15 | started: true 16 | phase: Running 17 | -------------------------------------------------------------------------------- /tests/e2e/prometheus-config-validation/00-assert.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: StatefulSet 3 | metadata: 4 | name: promreceiver-allocatorconfig-collector 5 | status: 6 | readyReplicas: 1 7 | replicas: 1 8 | --- 9 | apiVersion: apps/v1 10 | kind: Deployment 11 | metadata: 12 | name: promreceiver-allocatorconfig-targetallocator 13 | status: 14 | readyReplicas: 1 15 | replicas: 1 16 | -------------------------------------------------------------------------------- /tests/e2e/prometheus-config-validation/02-assert.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: StatefulSet 3 | metadata: 4 | name: promreceiver-allocatorconfig-extra-collector 5 | status: 6 | readyReplicas: 1 7 | replicas: 1 8 | --- 9 | apiVersion: apps/v1 10 | kind: Deployment 11 | metadata: 12 | name: promreceiver-allocatorconfig-extra-targetallocator 13 | status: 14 | readyReplicas: 1 15 | replicas: 1 16 | -------------------------------------------------------------------------------- /tests/e2e/prometheus-config-validation/03-assert.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: StatefulSet 3 | metadata: 4 | name: promreceiver-nopromconfig-collector 5 | status: 6 | readyReplicas: 1 7 | replicas: 1 8 | --- 9 | apiVersion: apps/v1 10 | kind: Deployment 11 | metadata: 12 | name: promreceiver-nopromconfig-targetallocator 13 | status: 14 | readyReplicas: 1 15 | replicas: 1 16 | -------------------------------------------------------------------------------- /tests/e2e/smoke-daemonset/00-install.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: opentelemetry.io/v1alpha1 2 | kind: OpenTelemetryCollector 3 | metadata: 4 | name: daemonset-test 5 | spec: 6 | mode: daemonset 7 | updateStrategy: 8 | type: RollingUpdate 9 | rollingUpdate: 10 | maxUnavailable: 1 11 | config: | 12 | receivers: 13 | jaeger: 14 | protocols: 15 | grpc: 16 | processors: 17 | exporters: 18 | debug: 19 | service: 20 | pipelines: 21 | traces: 22 | receivers: [jaeger] 23 | exporters: [debug] 24 | -------------------------------------------------------------------------------- /tests/e2e/smoke-daemonset/chainsaw-test.yaml: -------------------------------------------------------------------------------- 1 | # yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json 2 | apiVersion: chainsaw.kyverno.io/v1alpha1 3 | kind: Test 4 | metadata: 5 | creationTimestamp: null 6 | name: smoke-daemonset 7 | spec: 8 | steps: 9 | - name: step-00 10 | try: 11 | - apply: 12 | file: 00-install.yaml 13 | - assert: 14 | file: 00-assert.yaml 15 | -------------------------------------------------------------------------------- /tests/e2e/smoke-deletion/01-install.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: opentelemetry.io/v1beta1 2 | kind: OpenTelemetryCollector 3 | metadata: 4 | name: stateful 5 | spec: 6 | autoscaler: null 7 | config: 8 | receivers: 9 | nop: {} 10 | exporters: 11 | nop: {} 12 | service: 13 | pipelines: 14 | metrics: 15 | receivers: [nop] 16 | exporters: [nop] 17 | ports: [] 18 | targetAllocator: 19 | enabled: false 20 | observability: 21 | metrics: 22 | enableMetrics: false -------------------------------------------------------------------------------- /tests/e2e/smoke-dns-config/00-assert.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: poddnsconfig-collector 5 | spec: 6 | template: 7 | spec: 8 | dnsConfig: 9 | nameservers: 10 | - 8.8.8.8 11 | searches: 12 | - my.dns.search.suffix 13 | status: 14 | readyReplicas: 1 15 | -------------------------------------------------------------------------------- /tests/e2e/smoke-dns-config/chainsaw-test.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: chainsaw.kyverno.io/v1alpha1 2 | kind: Test 3 | metadata: 4 | creationTimestamp: null 5 | name: smoke-pod-dns-config 6 | spec: 7 | steps: 8 | - name: step-00 9 | try: 10 | - apply: 11 | file: 00-install.yaml 12 | - assert: 13 | file: 00-assert.yaml 14 | -------------------------------------------------------------------------------- /tests/e2e/smoke-init-containers/chainsaw-test.yaml: -------------------------------------------------------------------------------- 1 | # yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json 2 | apiVersion: chainsaw.kyverno.io/v1alpha1 3 | kind: Test 4 | metadata: 5 | creationTimestamp: null 6 | name: smoke-init-containers 7 | spec: 8 | steps: 9 | - name: step-00 10 | try: 11 | - apply: 12 | file: 00-install.yaml 13 | - assert: 14 | file: 00-assert.yaml 15 | -------------------------------------------------------------------------------- /tests/e2e/smoke-ip-families/chainsaw-test.yaml: -------------------------------------------------------------------------------- 1 | # yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json 2 | apiVersion: chainsaw.kyverno.io/v1alpha1 3 | kind: Test 4 | metadata: 5 | creationTimestamp: null 6 | name: smoke-ip-families-policies 7 | spec: 8 | steps: 9 | - name: step-00 10 | try: 11 | - apply: 12 | file: 00-install.yaml 13 | - assert: 14 | file: 00-assert.yaml 15 | - name: step-01 16 | try: 17 | - apply: 18 | file: 01-install.yaml 19 | - assert: 20 | file: 01-assert.yaml 21 | -------------------------------------------------------------------------------- /tests/e2e/smoke-pod-annotations/00-assert.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: pa-collector 5 | annotations: 6 | regular-annotation: regular-value 7 | spec: 8 | template: 9 | metadata: 10 | annotations: 11 | pod-annotation1: value1 12 | pod-annotation2: value2 13 | status: 14 | readyReplicas: 1 15 | -------------------------------------------------------------------------------- /tests/e2e/smoke-pod-annotations/00-install.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: opentelemetry.io/v1alpha1 2 | kind: OpenTelemetryCollector 3 | metadata: 4 | name: pa 5 | annotations: 6 | regular-annotation: regular-value 7 | spec: 8 | podAnnotations: 9 | pod-annotation1: value1 10 | pod-annotation2: value2 11 | config: | 12 | receivers: 13 | jaeger: 14 | protocols: 15 | grpc: 16 | processors: 17 | 18 | exporters: 19 | debug: 20 | 21 | service: 22 | pipelines: 23 | traces: 24 | receivers: [jaeger] 25 | exporters: [debug] 26 | -------------------------------------------------------------------------------- /tests/e2e/smoke-pod-annotations/chainsaw-test.yaml: -------------------------------------------------------------------------------- 1 | # yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json 2 | apiVersion: chainsaw.kyverno.io/v1alpha1 3 | kind: Test 4 | metadata: 5 | creationTimestamp: null 6 | name: smoke-pod-annotations 7 | spec: 8 | steps: 9 | - name: step-00 10 | try: 11 | - apply: 12 | file: 00-install.yaml 13 | - assert: 14 | file: 00-assert.yaml 15 | -------------------------------------------------------------------------------- /tests/e2e/smoke-pod-labels/chainsaw-test.yaml: -------------------------------------------------------------------------------- 1 | # yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json 2 | apiVersion: chainsaw.kyverno.io/v1alpha1 3 | kind: Test 4 | metadata: 5 | creationTimestamp: null 6 | name: smoke-pod-labels 7 | spec: 8 | steps: 9 | - name: step-00 10 | try: 11 | - apply: 12 | file: 00-install.yaml 13 | - assert: 14 | file: 00-assert.yaml 15 | -------------------------------------------------------------------------------- /tests/e2e/smoke-ports/00-install.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: opentelemetry.io/v1beta1 2 | kind: OpenTelemetryCollector 3 | metadata: 4 | name: smoke-ports 5 | spec: 6 | mode: daemonset 7 | ports: 8 | - appProtocol: grpc 9 | name: otlp-grpc 10 | port: 4317 11 | protocol: TCP 12 | targetPort: 4317 13 | hostPort: 4317 14 | config: 15 | receivers: 16 | jaeger: 17 | protocols: 18 | grpc: {} 19 | processors: 20 | exporters: 21 | debug: {} 22 | service: 23 | pipelines: 24 | traces: 25 | receivers: [jaeger] 26 | exporters: [debug] 27 | -------------------------------------------------------------------------------- /tests/e2e/smoke-ports/01-install.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: opentelemetry.io/v1beta1 2 | kind: OpenTelemetryCollector 3 | metadata: 4 | name: smoke-ports 5 | spec: 6 | mode: daemonset 7 | ports: 8 | - appProtocol: http 9 | name: custom-port 10 | port: 9090 11 | protocol: TCP 12 | targetPort: 9090 13 | hostPort: 9090 14 | config: 15 | receivers: 16 | nop: {} 17 | processors: 18 | exporters: 19 | prometheus: 20 | endpoint: "0.0.0.0:9090" 21 | service: 22 | pipelines: 23 | traces: 24 | receivers: [nop] 25 | exporters: [prometheus] 26 | -------------------------------------------------------------------------------- /tests/e2e/smoke-ports/chainsaw-test.yaml: -------------------------------------------------------------------------------- 1 | # yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json 2 | apiVersion: chainsaw.kyverno.io/v1alpha1 3 | kind: Test 4 | metadata: 5 | creationTimestamp: null 6 | name: smoke-ports 7 | spec: 8 | steps: 9 | - name: step-00 10 | try: 11 | - apply: 12 | file: 00-install.yaml 13 | - assert: 14 | file: 00-assert.yaml 15 | - name: step-01 16 | try: 17 | - apply: 18 | file: 01-install.yaml 19 | - assert: 20 | file: 01-assert.yaml 21 | -------------------------------------------------------------------------------- /tests/e2e/smoke-restarting-deployment/00-assert.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: restarting-collector 5 | status: 6 | readyReplicas: 1 7 | 8 | --- 9 | 10 | apiVersion: v1 11 | kind: Pod 12 | spec: 13 | containers: 14 | - name: otc-container 15 | status: 16 | phase: Running 17 | 18 | --- 19 | 20 | apiVersion: v1 21 | kind: Service 22 | metadata: 23 | name: restarting-collector 24 | spec: 25 | ports: 26 | - targetPort: 14250 27 | -------------------------------------------------------------------------------- /tests/e2e/smoke-restarting-deployment/00-errors.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: restarting-collector 5 | spec: 6 | ports: 7 | - targetPort: 4317 8 | -------------------------------------------------------------------------------- /tests/e2e/smoke-restarting-deployment/00-install.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: opentelemetry.io/v1alpha1 2 | kind: OpenTelemetryCollector 3 | metadata: 4 | name: restarting 5 | spec: 6 | config: | 7 | receivers: 8 | jaeger: 9 | protocols: 10 | grpc: 11 | processors: 12 | exporters: 13 | debug: 14 | service: 15 | pipelines: 16 | traces: 17 | receivers: [jaeger] 18 | exporters: [debug] 19 | -------------------------------------------------------------------------------- /tests/e2e/smoke-restarting-deployment/01-assert-second-config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: restarting-collector 5 | status: 6 | readyReplicas: 1 7 | 8 | --- 9 | 10 | apiVersion: v1 11 | kind: Pod 12 | spec: 13 | containers: 14 | - name: otc-container 15 | status: 16 | phase: Running 17 | 18 | --- 19 | 20 | apiVersion: v1 21 | kind: Service 22 | metadata: 23 | name: restarting-collector 24 | spec: 25 | ports: 26 | - targetPort: 14250 27 | - targetPort: 4317 28 | -------------------------------------------------------------------------------- /tests/e2e/smoke-restarting-deployment/01-install-second-config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: opentelemetry.io/v1alpha1 2 | kind: OpenTelemetryCollector 3 | metadata: 4 | name: restarting 5 | spec: 6 | config: | 7 | receivers: 8 | jaeger: 9 | protocols: 10 | grpc: 11 | otlp: 12 | protocols: 13 | grpc: 14 | processors: 15 | exporters: 16 | debug: 17 | service: 18 | pipelines: 19 | traces: 20 | receivers: [jaeger, otlp] 21 | exporters: [debug] 22 | 23 | -------------------------------------------------------------------------------- /tests/e2e/smoke-shareprocessnamespace/00-assert.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: test-shareprocns-collector 5 | spec: 6 | template: 7 | spec: 8 | shareProcessNamespace: true 9 | -------------------------------------------------------------------------------- /tests/e2e/smoke-shareprocessnamespace/00-install.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: opentelemetry.io/v1alpha1 2 | kind: OpenTelemetryCollector 3 | metadata: 4 | name: test-shareprocns 5 | spec: 6 | shareProcessNamespace: true 7 | config: | 8 | receivers: 9 | jaeger: 10 | protocols: 11 | grpc: 12 | otlp: 13 | protocols: 14 | grpc: 15 | http: 16 | processors: 17 | 18 | exporters: 19 | debug: 20 | 21 | service: 22 | pipelines: 23 | traces: 24 | receivers: [jaeger,otlp] 25 | exporters: [debug] -------------------------------------------------------------------------------- /tests/e2e/smoke-shareprocessnamespace/chainsaw-test.yaml: -------------------------------------------------------------------------------- 1 | # yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json 2 | apiVersion: chainsaw.kyverno.io/v1alpha1 3 | kind: Test 4 | metadata: 5 | creationTimestamp: null 6 | name: smoke-shareprocessnamespace 7 | spec: 8 | steps: 9 | - name: step-00 10 | try: 11 | - apply: 12 | file: 00-install.yaml 13 | - assert: 14 | file: 00-assert.yaml 15 | -------------------------------------------------------------------------------- /tests/e2e/smoke-sidecar-other-namespace/chainsaw-test.yaml: -------------------------------------------------------------------------------- 1 | # yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json 2 | apiVersion: chainsaw.kyverno.io/v1alpha1 3 | kind: Test 4 | metadata: 5 | creationTimestamp: null 6 | name: smoke-sidecar-other-namespace 7 | spec: 8 | steps: 9 | - name: step-00 10 | try: 11 | - apply: 12 | file: 00-install.yaml 13 | - name: step-01 14 | try: 15 | - apply: 16 | file: 01-install-app.yaml 17 | - assert: 18 | file: 01-assert.yaml 19 | -------------------------------------------------------------------------------- /tests/e2e/smoke-sidecar/00-install.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: opentelemetry.io/v1alpha1 2 | kind: OpenTelemetryCollector 3 | metadata: 4 | name: sidecar-for-my-app 5 | spec: 6 | mode: sidecar 7 | config: | 8 | receivers: 9 | jaeger: 10 | protocols: 11 | grpc: 12 | processors: 13 | 14 | exporters: 15 | debug: 16 | 17 | service: 18 | pipelines: 19 | traces: 20 | receivers: [jaeger] 21 | exporters: [debug] 22 | -------------------------------------------------------------------------------- /tests/e2e/smoke-sidecar/01-assert.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | annotations: 5 | sidecar.opentelemetry.io/inject: "true" 6 | labels: 7 | app: my-pod-with-sidecar 8 | spec: 9 | containers: 10 | - name: myapp 11 | - name: otc-container 12 | env: 13 | - name: POD_NAME 14 | - name: OTEL_CONFIG 15 | - name: OTEL_RESOURCE_ATTRIBUTES_POD_NAME 16 | - name: OTEL_RESOURCE_ATTRIBUTES_POD_UID 17 | - name: OTEL_RESOURCE_ATTRIBUTES_NODE_NAME 18 | - name: OTEL_RESOURCE_ATTRIBUTES 19 | status: 20 | phase: Running 21 | -------------------------------------------------------------------------------- /tests/e2e/smoke-sidecar/01-install-app.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: my-deployment-with-sidecar 5 | spec: 6 | selector: 7 | matchLabels: 8 | app: my-pod-with-sidecar 9 | replicas: 1 10 | template: 11 | metadata: 12 | labels: 13 | app: my-pod-with-sidecar 14 | annotations: 15 | sidecar.opentelemetry.io/inject: "true" 16 | spec: 17 | containers: 18 | - name: myapp 19 | image: ghcr.io/open-telemetry/opentelemetry-operator/e2e-test-app-python:main 20 | -------------------------------------------------------------------------------- /tests/e2e/smoke-sidecar/chainsaw-test.yaml: -------------------------------------------------------------------------------- 1 | # yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json 2 | apiVersion: chainsaw.kyverno.io/v1alpha1 3 | kind: Test 4 | metadata: 5 | creationTimestamp: null 6 | name: smoke-sidecar 7 | spec: 8 | steps: 9 | - name: step-00 10 | try: 11 | - apply: 12 | file: 00-install.yaml 13 | - name: step-01 14 | try: 15 | - apply: 16 | file: 01-install-app.yaml 17 | - assert: 18 | file: 01-assert.yaml 19 | -------------------------------------------------------------------------------- /tests/e2e/smoke-simplest-v1beta1/00-install.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: opentelemetry.io/v1beta1 2 | kind: OpenTelemetryCollector 3 | metadata: 4 | name: simplest 5 | spec: 6 | config: 7 | receivers: 8 | jaeger: 9 | protocols: 10 | grpc: 11 | otlp: 12 | protocols: 13 | grpc: 14 | http: 15 | processors: 16 | 17 | exporters: 18 | debug: 19 | 20 | service: 21 | pipelines: 22 | traces: 23 | receivers: [jaeger,otlp] 24 | exporters: [debug] 25 | -------------------------------------------------------------------------------- /tests/e2e/smoke-simplest-v1beta1/chainsaw-test.yaml: -------------------------------------------------------------------------------- 1 | # yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json 2 | apiVersion: chainsaw.kyverno.io/v1alpha1 3 | kind: Test 4 | metadata: 5 | creationTimestamp: null 6 | name: smoke-simplest-v1beta1 7 | spec: 8 | steps: 9 | - name: step-00 10 | try: 11 | - apply: 12 | file: 00-install.yaml 13 | - assert: 14 | file: 00-assert.yaml 15 | -------------------------------------------------------------------------------- /tests/e2e/smoke-simplest/00-install.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: opentelemetry.io/v1alpha1 2 | kind: OpenTelemetryCollector 3 | metadata: 4 | name: simplest 5 | spec: 6 | config: | 7 | receivers: 8 | jaeger: 9 | protocols: 10 | grpc: 11 | otlp: 12 | protocols: 13 | grpc: 14 | http: 15 | processors: 16 | 17 | exporters: 18 | debug: 19 | 20 | service: 21 | pipelines: 22 | traces: 23 | receivers: [jaeger,otlp] 24 | exporters: [debug] -------------------------------------------------------------------------------- /tests/e2e/smoke-simplest/chainsaw-test.yaml: -------------------------------------------------------------------------------- 1 | # yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json 2 | apiVersion: chainsaw.kyverno.io/v1alpha1 3 | kind: Test 4 | metadata: 5 | creationTimestamp: null 6 | name: smoke-simplest 7 | spec: 8 | steps: 9 | - name: step-00 10 | try: 11 | - apply: 12 | file: 00-install.yaml 13 | - assert: 14 | file: 00-assert.yaml 15 | -------------------------------------------------------------------------------- /tests/e2e/smoke-statefulset/00-assert.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: StatefulSet 3 | metadata: 4 | name: stateful-collector 5 | status: 6 | replicas: 1 7 | readyReplicas: 1 8 | --- 9 | apiVersion: opentelemetry.io/v1beta1 10 | kind: OpenTelemetryCollector 11 | metadata: 12 | name: stateful 13 | status: 14 | (starts_with(image, 'ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector')): true 15 | (version != ''): true 16 | scale: 17 | replicas: 1 18 | statusReplicas: "1/1" 19 | -------------------------------------------------------------------------------- /tests/e2e/smoke-statefulset/00-install.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: opentelemetry.io/v1beta1 2 | kind: OpenTelemetryCollector 3 | metadata: 4 | name: stateful 5 | spec: 6 | mode: statefulset 7 | config: 8 | receivers: 9 | jaeger: 10 | protocols: 11 | grpc: 12 | exporters: 13 | debug: 14 | service: 15 | pipelines: 16 | traces: 17 | receivers: [jaeger] 18 | exporters: [debug] 19 | -------------------------------------------------------------------------------- /tests/e2e/smoke-statefulset/chainsaw-test.yaml: -------------------------------------------------------------------------------- 1 | # yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json 2 | apiVersion: chainsaw.kyverno.io/v1alpha1 3 | kind: Test 4 | metadata: 5 | creationTimestamp: null 6 | name: smoke-statefulset 7 | spec: 8 | steps: 9 | - name: step-00 10 | try: 11 | - apply: 12 | file: 00-install.yaml 13 | - assert: 14 | file: 00-assert.yaml 15 | -------------------------------------------------------------------------------- /tests/e2e/smoke-targetallocator/01-error.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: stateful-targetallocator 5 | status: 6 | replicas: 1 7 | readyReplicas: 1 8 | observedGeneration: 1 9 | -------------------------------------------------------------------------------- /tests/e2e/statefulset-features/chainsaw-test.yaml: -------------------------------------------------------------------------------- 1 | # yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json 2 | apiVersion: chainsaw.kyverno.io/v1alpha1 3 | kind: Test 4 | metadata: 5 | creationTimestamp: null 6 | name: statefulset-features 7 | spec: 8 | steps: 9 | - name: step-00 10 | try: 11 | - apply: 12 | file: 00-install.yaml 13 | - assert: 14 | file: 00-assert.yaml 15 | - name: step-01 16 | try: 17 | - apply: 18 | file: 01-update-volume-claim-templates.yaml 19 | - assert: 20 | file: 01-assert.yaml 21 | -------------------------------------------------------------------------------- /tests/e2e/use-labels-for-resource-attributes/00-install-collector.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: opentelemetry.io/v1alpha1 2 | kind: OpenTelemetryCollector 3 | metadata: 4 | name: sidecar 5 | spec: 6 | config: | 7 | receivers: 8 | otlp: 9 | protocols: 10 | grpc: 11 | http: 12 | processors: 13 | 14 | exporters: 15 | debug: 16 | 17 | service: 18 | pipelines: 19 | traces: 20 | receivers: [otlp] 21 | exporters: [debug] 22 | mode: sidecar 23 | -------------------------------------------------------------------------------- /tests/e2e/use-labels-for-resource-attributes/00-install-instrumentation.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: opentelemetry.io/v1alpha1 2 | kind: Instrumentation 3 | metadata: 4 | name: sdk-only 5 | spec: 6 | exporter: 7 | endpoint: http://localhost:4317 8 | defaults: 9 | useLabelsForResourceAttributes: true 10 | 11 | -------------------------------------------------------------------------------- /tests/e2e/versioned-configmaps/00-assert.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | name: simple-collector 6 | spec: 7 | template: 8 | spec: 9 | volumes: 10 | - name: otc-internal 11 | configMap: 12 | name: simple-collector-5723ff27 13 | status: 14 | readyReplicas: 1 15 | --- 16 | apiVersion: v1 17 | kind: ConfigMap 18 | metadata: 19 | name: simple-collector-5723ff27 20 | -------------------------------------------------------------------------------- /tests/e2e/versioned-configmaps/00-install.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: opentelemetry.io/v1beta1 3 | kind: OpenTelemetryCollector 4 | metadata: 5 | name: simple 6 | spec: 7 | mode: "deployment" 8 | configVersions: 1 9 | config: 10 | receivers: 11 | otlp: 12 | protocols: 13 | grpc: {} 14 | http: {} 15 | processors: 16 | batch: 17 | send_batch_size: 10000 18 | timeout: 10s 19 | exporters: 20 | debug: {} 21 | 22 | service: 23 | pipelines: 24 | traces: 25 | receivers: [otlp] 26 | processors: [batch] 27 | exporters: [debug] 28 | -------------------------------------------------------------------------------- /tests/e2e/versioned-configmaps/01-assert.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | name: simple-collector 6 | spec: 7 | template: 8 | spec: 9 | volumes: 10 | - name: otc-internal 11 | configMap: 12 | name: simple-collector-dfcfaead 13 | status: 14 | readyReplicas: 1 15 | --- 16 | apiVersion: v1 17 | kind: ConfigMap 18 | metadata: 19 | name: simple-collector-dfcfaead 20 | --- 21 | apiVersion: v1 22 | kind: ConfigMap 23 | metadata: 24 | name: simple-collector-5723ff27 25 | -------------------------------------------------------------------------------- /tests/e2e/versioned-configmaps/01-update.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: opentelemetry.io/v1beta1 3 | kind: OpenTelemetryCollector 4 | metadata: 5 | name: simple 6 | spec: 7 | mode: "deployment" 8 | configVersions: 1 9 | config: 10 | receivers: 11 | otlp: 12 | protocols: 13 | grpc: {} 14 | http: {} 15 | processors: 16 | batch: 17 | send_batch_size: 10000 18 | timeout: 20s 19 | exporters: 20 | debug: {} 21 | 22 | service: 23 | pipelines: 24 | traces: 25 | receivers: [otlp] 26 | processors: [batch] 27 | exporters: [debug] 28 | -------------------------------------------------------------------------------- /tests/e2e/versioned-configmaps/02-error.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: simplest-collector-ea71c537 6 | -------------------------------------------------------------------------------- /tests/e2e/versioned-configmaps/02-update.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: opentelemetry.io/v1beta1 3 | kind: OpenTelemetryCollector 4 | metadata: 5 | name: simple 6 | spec: 7 | mode: "deployment" 8 | configVersions: 1 9 | config: 10 | receivers: 11 | otlp: 12 | protocols: 13 | grpc: {} 14 | http: {} 15 | processors: 16 | batch: 17 | send_batch_size: 10000 18 | timeout: 30s 19 | exporters: 20 | debug: {} 21 | 22 | service: 23 | pipelines: 24 | traces: 25 | receivers: [otlp] 26 | processors: [batch] 27 | exporters: [debug] -------------------------------------------------------------------------------- /tests/e2e/volume-claim-label/chainsaw-test.yaml: -------------------------------------------------------------------------------- 1 | # yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json 2 | apiVersion: chainsaw.kyverno.io/v1alpha1 3 | kind: Test 4 | metadata: 5 | creationTimestamp: null 6 | name: persistent-volume-claim-label 7 | spec: 8 | steps: 9 | - name: step-00 10 | try: 11 | - apply: 12 | file: 00-install.yaml 13 | - assert: 14 | file: 00-assert.yaml 15 | - name: step-01 16 | try: 17 | - apply: 18 | file: 01-update-volume-claim-template-labels.yaml 19 | - assert: 20 | file: 01-assert.yaml 21 | -------------------------------------------------------------------------------- /tests/test-e2e-apps/apache-httpd/Dockerfile: -------------------------------------------------------------------------------- 1 | 2 | FROM httpd:2.4 3 | 4 | RUN sed -i "s#Listen 80#Listen 8080#g" /usr/local/apache2/conf/httpd.conf 5 | RUN chmod 777 -R /usr/local/apache2/ -------------------------------------------------------------------------------- /tests/test-e2e-apps/bridge-server/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:1.23-alpine AS builder 2 | 3 | WORKDIR /app 4 | COPY go.mod go.sum ./ 5 | RUN go mod download 6 | COPY . . 7 | RUN CGO_ENABLED=0 GOOS=linux GO111MODULE=on go build -o bridge-server main.go 8 | 9 | FROM scratch 10 | COPY --from=builder /app/bridge-server . 11 | ENTRYPOINT ["./bridge-server"] 12 | -------------------------------------------------------------------------------- /tests/test-e2e-apps/bridge-server/opampsrv/logger.go: -------------------------------------------------------------------------------- 1 | package opampsrv 2 | 3 | import ( 4 | "context" 5 | "log" 6 | 7 | "github.com/open-telemetry/opamp-go/client/types" 8 | ) 9 | 10 | var _ types.Logger = &Logger{} 11 | 12 | type Logger struct { 13 | logger *log.Logger 14 | } 15 | 16 | func (l *Logger) Debugf(ctx context.Context, format string, v ...interface{}) { 17 | l.logger.Printf(format, v...) 18 | } 19 | 20 | func (l *Logger) Errorf(ctx context.Context, format string, v ...interface{}) { 21 | l.logger.Printf(format, v...) 22 | } 23 | -------------------------------------------------------------------------------- /tests/test-e2e-apps/dotnet/DiceRoller/DiceRoller.csproj: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | net8.0 5 | enable 6 | enable 7 | 8 | 9 | 10 | -------------------------------------------------------------------------------- /tests/test-e2e-apps/java/build.gradle: -------------------------------------------------------------------------------- 1 | plugins { 2 | id 'java' 3 | id 'org.springframework.boot' version '3.1.3' 4 | id 'io.spring.dependency-management' version '1.1.3' 5 | } 6 | 7 | group = 'com.example' 8 | version = '0.0.1-SNAPSHOT' 9 | 10 | java { 11 | sourceCompatibility = '17' 12 | } 13 | 14 | repositories { 15 | mavenCentral() 16 | } 17 | 18 | dependencies { 19 | implementation 'org.springframework.boot:spring-boot-starter-web' 20 | testImplementation 'org.springframework.boot:spring-boot-starter-web-test' 21 | } 22 | 23 | tasks.named('test') { 24 | useJUnitPlatform() 25 | } 26 | -------------------------------------------------------------------------------- /tests/test-e2e-apps/metrics-basic-auth/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.11-slim 2 | 3 | COPY requirements.txt . 4 | RUN pip install -r requirements.txt 5 | 6 | COPY app.py . 7 | 8 | EXPOSE 9123 9 | 10 | CMD ["python", "app.py"] 11 | -------------------------------------------------------------------------------- /tests/test-e2e-apps/metrics-basic-auth/README.md: -------------------------------------------------------------------------------- 1 | # Metrics Basic Auth E2E Test App 2 | Simple web application used in an end-to-end (E2E) test to verify that the OpenTelemetry collector can retrieve secret authentication details from the target allocator over mTLS. 3 | 4 | ## Overview 5 | The web app provides a metrics endpoint secured with basic authentication, simulating real-world scenarios where services require secure access to their metrics. 6 | 7 | ## Usage 8 | This app is used within the E2E test suite to verify the OpenTelemetry operator's handling of mTLS-secured communications. -------------------------------------------------------------------------------- /tests/test-e2e-apps/metrics-basic-auth/requirements.txt: -------------------------------------------------------------------------------- 1 | Flask==2.3.3 2 | prometheus_client==0.20.0 3 | -------------------------------------------------------------------------------- /tests/test-e2e-apps/nodejs/app.js: -------------------------------------------------------------------------------- 1 | /*app.js*/ 2 | const express = require('express'); 3 | 4 | const PORT = parseInt(process.env.PORT || '3000'); 5 | const app = express(); 6 | 7 | function getRandomNumber(min, max) { 8 | return Math.floor(Math.random() * (max - min + 1) + min); 9 | } 10 | 11 | app.get('/rolldice', (req, res) => { 12 | res.send(getRandomNumber(1, 6).toString()); 13 | }); 14 | 15 | app.listen(PORT, () => { 16 | console.log(`Listening for requests on http://localhost:${PORT}`); 17 | }); 18 | 19 | -------------------------------------------------------------------------------- /tests/test-e2e-apps/nodejs/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "nodejs", 3 | "version": "1.0.0", 4 | "description": "", 5 | "main": "app.js", 6 | "scripts": { 7 | "test": "echo \"Error: no test specified\" && exit 1" 8 | }, 9 | "keywords": [], 10 | "author": "", 11 | "license": "ISC", 12 | "dependencies": { 13 | "express": "^5.1.0" 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /tests/test-e2e-apps/python/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:alpine3.18 2 | 3 | WORKDIR /app 4 | 5 | COPY requirements.txt . 6 | RUN pip install --no-cache-dir -r requirements.txt 7 | 8 | COPY app.py . 9 | 10 | # Set the FLASK_APP environment variable 11 | ENV FLASK_APP=app.py 12 | 13 | CMD ["flask", "run", "--host=0.0.0.0", "--port=8080"] 14 | -------------------------------------------------------------------------------- /tests/test-e2e-apps/python/app.py: -------------------------------------------------------------------------------- 1 | from flask import Flask 2 | 3 | app = Flask(__name__) 4 | 5 | @app.route('/') 6 | def index(): 7 | return "Hi" 8 | 9 | if __name__ == "__main__": 10 | app.run(host='0.0.0.0') 11 | -------------------------------------------------------------------------------- /tests/test-e2e-apps/python/requirements.txt: -------------------------------------------------------------------------------- 1 | Flask==2.3.3 --------------------------------------------------------------------------------