├── .config ├── 1espt │ └── PipelineAutobaseliningConfig.yml └── guardian │ └── .gdnbaselines ├── .github ├── dependabot.yml ├── pull_request_template.md └── workflows │ ├── build-and-push-dependent-helm-charts.yml │ ├── build-and-release-mixin.yml │ ├── scan-released-image.yml │ ├── scan.yml │ ├── size.yml │ └── stale.yml ├── .gitignore ├── .gitmodules ├── .pipelines ├── OneBranch.Official.yml ├── azure-pipeline-aksdeploy-test.yml ├── azure-pipeline-aksdeploy.yml ├── azure-pipeline-build.yml ├── azure-pipeline-config-tests.yml ├── azure-pipeline-regionstest.yml ├── azure-pipeline-release.yml ├── azure-template-aksdeploy.yml ├── azure-template-regionstest.yml ├── cgmanifest.json └── deployment │ ├── ServiceGroupRoot │ ├── Parameters │ │ ├── PrometheusCollector.ARCChart.Parameters.json │ │ ├── PrometheusCollector.ConfigReader.Parameters.json │ │ ├── PrometheusCollector.KSM.Parameters.json │ │ ├── PrometheusCollector.Linux.Parameters.json │ │ ├── PrometheusCollector.LinuxCCP.Parameters.json │ │ ├── PrometheusCollector.NE.Parameters.json │ │ ├── PrometheusCollector.TargetAllocator.Parameters.json │ │ └── PrometheusCollector.Windows.Parameters.json │ ├── RolloutSpecs │ │ └── RolloutSpecs.json │ ├── ScopeBindings │ │ └── Public.ScopeBindings.json │ ├── Scripts │ │ ├── pushAgentToAcr.sh │ │ └── pushChartToAcr.sh │ ├── ServiceModels │ │ └── Public.ServiceModel.json │ └── buildver.txt │ └── arc-extension-release │ └── ServiceGroupRoot │ ├── Parameters │ └── PrometheusCollector.ARCExtension.Parameters.json │ ├── RolloutSpecs │ └── RolloutSpecs.json │ ├── ScopeBindings │ └── Public.ScopeBindings.json │ ├── Scripts │ └── arcExtensionRelease.sh │ ├── ServiceModels │ └── Public.ServiceModel.json │ └── buildver.txt ├── .trivyignore ├── AddonArmTemplate ├── FullAzureMonitorMetricsProfile.json ├── FullAzureMonitorMetricsProfileParameters.json ├── README.md └── WindowsRecordingRuleGroupTemplate │ ├── WindowsRecordingRules.json │ └── WindowsRecordingRulesParameters.json ├── AddonBicepTemplate ├── FullAzureMonitorMetricsProfile.bicep ├── FullAzureMonitorMetricsProfileParameters.json ├── README.md ├── nested_azuremonitormetrics_dcra_clusterResourceId.bicep ├── nested_azuremonitormetrics_profile_clusterResourceId.bicep ├── nested_grafana_amw_role_assignment.bicep ├── recommendedMetricAlerts.bicep └── recommendedMetricAlertsProfileParameters.json ├── AddonPolicyTemplate ├── AddonPolicyMetricsProfile.parameters.json ├── AddonPolicyMetricsProfile.rules.json └── README.md ├── AddonTerraformTemplate ├── README.md ├── main.tf ├── outputs.tf ├── providers.tf └── variables.tf ├── ArcArmTemplate ├── FullAzureMonitorMetricsProfile.json └── FullAzureMonitorMetricsProfileParameters.json ├── ArcBicepTemplate ├── FullAzureMonitorMetricsProfile.bicep ├── FullAzureMonitorMetricsProfileParameters.json ├── README.md ├── nested_azuremonitormetrics_arc_k8s_extension_clusterResourceId.bicep └── nested_azuremonitormetrics_dcra_clusterResourceId.bicep ├── Azure-ARM-templates ├── Prometheus-RemoteWrite-DCR-artifacts │ ├── AMW.png │ ├── CustomDCE.json │ ├── CustomDCE.parameters.json │ ├── CustomDCR.json │ ├── CustomDCR.parameters.json │ ├── DCE_Overview.png │ ├── DCR_JSON.png │ ├── DCR_Overview.png │ └── README.md └── Workload-Rules │ ├── Alert-Rules-Parameters.json │ ├── Argo │ └── argocd-alerting-rules.json │ ├── ElasticSearch │ ├── elasticsearch-alerting-rules.json │ └── elasticsearch-recording-rules.json │ ├── Kafka │ └── kafka-alerting-rules.json │ └── Recording-Rules-Parameters.json ├── CODEOWNERS ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── GeneratedMonitoringArtifacts ├── Default dashboards & metrics & rr lists.docx ├── Default │ ├── DefaultRecordingRules.json │ ├── default_dashboards_recrules_alerts.md │ └── recommendedMetricAlerts.json ├── DefaultAlertsList.txt ├── DefaultRecordingRulesList.txt ├── GenerateAlertARMTemplateResources.ps1 ├── GenerateRecordingRuleARMTemplateResources.ps1 └── non-default │ ├── README.md │ ├── ama-metrics-settings-configmap.yaml │ ├── api-server │ ├── apiserver-RecordingRules.json │ └── apiserver.json │ ├── coredns │ └── coredns.json │ ├── kubeproxy │ └── proxy.json │ ├── kubernetes │ ├── cluster-total.json │ ├── k8s-resources-multicluster.json │ ├── namespace-by-pod.json │ ├── namespace-by-workload.json │ ├── persistentvolumesusage.json │ ├── pod-total.json │ └── workload-total.json │ └── node-exporter │ ├── node-cluster-rsrc-use.json │ └── node-multicluster-rsrc-use.json ├── Hotfix-417488465-08272023.md ├── LICENSE ├── NOTICE ├── README.md ├── RELEASENOTES.md ├── REMOTE-WRITE-RELEASENOTES.md ├── SECURITY.md ├── SUPPORT.md ├── internal ├── alerts │ ├── README.md │ ├── example-alert-template.json │ └── test-prom-alerts.json ├── docs │ ├── 1PChartToAddonMigration.md │ ├── ARC.md │ ├── BUILDANDRELEASE.md │ ├── CONFIGPROCESSINGTESTS.md │ ├── DEPENDENTCHARTS.md │ ├── ESRPCodeSign.md │ ├── HPA.md │ ├── MARINER.md │ ├── MULTIARCH.md │ ├── Operator-CRD.md │ ├── Operator-Sharding.md │ ├── PERFTESTING.md │ ├── README.md │ ├── RegionTestsPipelineWiki.md │ ├── Telemetry.md │ ├── UpgradeDependencies.md │ ├── UpgradeotelCol.md │ └── customMetricEquivalents.md ├── grafana_uami │ ├── action.ps1 │ └── patch-add-umi.json ├── monitoring │ ├── README.md │ └── dashboards │ │ ├── cicd-db.json │ │ └── prod-near-ring-db.json ├── referenceapp │ ├── golang │ │ ├── go.mod │ │ ├── go.sum │ │ ├── linux │ │ │ └── Dockerfile │ │ ├── main.go │ │ └── windows │ │ │ └── Dockerfile │ ├── linux-http-scrape-config.yaml │ ├── linux-https-scrape-config.yaml │ ├── linux-scrape-config.yaml │ ├── prometheus-config │ ├── prometheus-mtls-ref-app.yaml │ ├── prometheus-reference-app.yaml │ ├── python │ │ ├── app.py │ │ ├── linux │ │ │ └── Dockerfile │ │ └── windows │ │ │ └── Dockerfile │ ├── referance-app-dashboard.json │ ├── testTemplates │ │ ├── azure-pvc-disk.yaml │ │ └── azure-pvc.yaml │ ├── win-prometheus-reference-app.yaml │ └── windows-scrape-config.yaml ├── remotewrite │ └── sidecar.yaml ├── scripts │ └── troubleshoot │ │ ├── README.md │ │ └── TroubleshootError.ps1 └── windowsExporterInstaller │ ├── README.md │ └── windows-exporter-daemonset.yaml ├── mixins ├── coredns │ ├── .gitignore │ ├── LICENSE │ ├── Makefile │ ├── README.md │ ├── alerts │ │ ├── add-runbook-links.libsonnet │ │ ├── alerts.libsonnet │ │ ├── coredns.libsonnet │ │ └── forward.libsonnet │ ├── config.libsonnet │ ├── dashboards │ │ ├── coredns.libsonnet │ │ └── dashboards.libsonnet │ ├── jsonnetfile.json │ ├── lib │ │ ├── alerts.jsonnet │ │ ├── dashboards.jsonnet │ │ └── utils.libsonnet │ ├── mixin.libsonnet │ └── tests.yaml ├── kubernetes │ ├── .gitignore │ ├── .lint │ ├── .vale.ini │ ├── CODE_OF_CONDUCT.md │ ├── CONTRIBUTING.md │ ├── DESIGN.md │ ├── LICENSE │ ├── Makefile │ ├── OWNERS │ ├── README.md │ ├── SECURITY.md │ ├── SECURITY_CONTACTS │ ├── alerts │ │ ├── alerts.libsonnet │ │ ├── apps_alerts.libsonnet │ │ ├── kube_apiserver.libsonnet │ │ ├── kube_controller_manager.libsonnet │ │ ├── kube_proxy.libsonnet │ │ ├── kube_scheduler.libsonnet │ │ ├── kubelet.libsonnet │ │ ├── resource_alerts.libsonnet │ │ ├── storage_alerts.libsonnet │ │ └── system_alerts.libsonnet │ ├── config.libsonnet │ ├── dashboards │ │ ├── apiserver.libsonnet │ │ ├── controller-manager.libsonnet │ │ ├── dashboards.libsonnet │ │ ├── defaults.libsonnet │ │ ├── k8s-resources-cluster.json │ │ ├── k8s-resources-namespace.json │ │ ├── k8s-resources-node.json │ │ ├── k8s-resources-pod.json │ │ ├── k8s-resources-windows-cluster.json │ │ ├── k8s-resources-windows-namespace.json │ │ ├── k8s-resources-windows-pod.json │ │ ├── k8s-resources-workload.json │ │ ├── k8s-resources-workloads-namespace.json │ │ ├── k8s-windows-cluster-rsrc-use.json │ │ ├── k8s-windows-node-rsrc-use.json │ │ ├── kubelet.json │ │ ├── kubelet.libsonnet │ │ ├── network-usage │ │ │ ├── cluster-total.libsonnet │ │ │ ├── namespace-by-pod.libsonnet │ │ │ ├── namespace-by-workload.libsonnet │ │ │ ├── pod-total.libsonnet │ │ │ └── workload-total.libsonnet │ │ ├── network.libsonnet │ │ ├── persistentvolumesusage.libsonnet │ │ ├── proxy.libsonnet │ │ ├── resources.libsonnet │ │ ├── resources │ │ │ ├── cluster.libsonnet │ │ │ ├── multi-cluster.libsonnet │ │ │ ├── namespace.libsonnet │ │ │ ├── node.libsonnet │ │ │ ├── pod.libsonnet │ │ │ ├── workload-namespace.libsonnet │ │ │ └── workload.libsonnet │ │ ├── scheduler.libsonnet │ │ └── windows.libsonnet │ ├── jsonnetfile.json │ ├── lib │ │ ├── absent_alert.libsonnet │ │ ├── add-runbook-links.libsonnet │ │ ├── alerts.jsonnet │ │ ├── dashboards.jsonnet │ │ ├── promgrafonnet │ │ │ ├── gauge.libsonnet │ │ │ ├── numbersinglestat.libsonnet │ │ │ └── promgrafonnet.libsonnet │ │ ├── rules.jsonnet │ │ └── utils.libsonnet │ ├── mixin.libsonnet │ ├── rules │ │ ├── apps.libsonnet │ │ ├── kube_apiserver-availability.libsonnet │ │ ├── kube_apiserver-burnrate.libsonnet │ │ ├── kube_apiserver-config.libsonnet │ │ ├── kube_apiserver-histogram.libsonnet │ │ ├── kube_apiserver.libsonnet │ │ ├── kube_scheduler.libsonnet │ │ ├── kubelet.libsonnet │ │ ├── node.libsonnet │ │ ├── rules.libsonnet │ │ └── windows.libsonnet │ ├── runbook.md │ ├── scripts │ │ ├── check-selectors-ksm.sh │ │ ├── go.mod │ │ ├── go.sum │ │ └── tools.go │ └── tests.yaml └── node │ ├── .gitignore │ ├── Makefile │ ├── README.md │ ├── alerts.jsonnet │ ├── alerts │ └── alerts.libsonnet │ ├── config.libsonnet │ ├── dashboards.jsonnet │ ├── dashboards │ ├── dashboards.libsonnet │ ├── node-rsrc-use.json │ ├── node.libsonnet │ ├── nodes.json │ └── use.libsonnet │ ├── jsonnetfile.json │ ├── lib │ └── prom-mixin.libsonnet │ ├── mixin.libsonnet │ ├── rules.jsonnet │ └── rules │ └── rules.libsonnet ├── otelcollector ├── LICENSE ├── NOTICE ├── VERSION ├── build │ ├── linux │ │ ├── .dockerignore │ │ ├── Dockerfile │ │ ├── ccp │ │ │ └── Dockerfile │ │ ├── configuration-reader │ │ │ └── Dockerfile │ │ └── rpm-repos │ │ │ └── mariner-official-extras.repo │ └── windows │ │ ├── .dockerignore │ │ ├── Dockerfile │ │ └── scripts │ │ └── setup.ps1 ├── configmapparser │ └── default-prom-configs │ │ ├── acstorCapacityProvisionerDefaultFile.yml │ │ ├── acstorMetricsExporterDefaultFile.yml │ │ ├── apiserverDefault.yml │ │ ├── cadvisorDefaultDs.yml │ │ ├── cadvisorDefaultRsAdvanced.yml │ │ ├── cadvisorDefaultRsSimple.yml │ │ ├── controlplane_apiserver.yml │ │ ├── controlplane_cluster_autoscaler.yml │ │ ├── controlplane_etcd.yml │ │ ├── controlplane_kube_controller_manager.yml │ │ ├── controlplane_kube_scheduler.yml │ │ ├── corednsDefault.yml │ │ ├── kappieBasicDefaultDs.yml │ │ ├── kubeletDefaultDs.yml │ │ ├── kubeletDefaultRsAdvanced.yml │ │ ├── kubeletDefaultRsAdvancedWindowsDaemonset.yml │ │ ├── kubeletDefaultRsSimple.yml │ │ ├── kubeproxyDefault.yml │ │ ├── kubestateDefault.yml │ │ ├── networkobservabilityCiliumDefaultDs.yml │ │ ├── networkobservabilityHubbleDefaultDs.yml │ │ ├── networkobservabilityRetinaDefaultDs.yml │ │ ├── nodeexporterDefaultDs.yml │ │ ├── nodeexporterDefaultRsAdvanced.yml │ │ ├── nodeexporterDefaultRsSimple.yml │ │ ├── podannotationsDefault.yml │ │ ├── prometheusCollectorHealth.yml │ │ ├── windowsexporterDefaultDs.yml │ │ ├── windowsexporterDefaultRsSimple.yml │ │ ├── windowskubeproxyDefaultDs.yml │ │ └── windowskubeproxyDefaultRsSimple.yml ├── configmaps │ ├── ama-metrics-prometheus-config-configmap.yaml │ ├── ama-metrics-prometheus-config-node-configmap.yaml │ ├── ama-metrics-prometheus-config-node-windows-configmap.yaml │ ├── ama-metrics-settings-configmap-v1.yaml │ └── ama-metrics-settings-configmap.yaml ├── configuration-reader-builder │ ├── Makefile │ ├── go.mod │ ├── go.sum │ └── main.go ├── customresources │ ├── pod-monitor-template.yaml │ └── service-monitor-template.yaml ├── deploy │ ├── addon-chart │ │ ├── Readme.md │ │ ├── azure-monitor-metrics-addon │ │ │ ├── Chart-template.yaml │ │ │ ├── local_testing_aks.ps1 │ │ │ ├── templates │ │ │ │ ├── _ama-metrics-helpers.tpl │ │ │ │ ├── _arc-extension-helpers.tpl │ │ │ │ ├── ama-metrics-clusterRole.yaml │ │ │ │ ├── ama-metrics-clusterRoleBinding.yaml │ │ │ │ ├── ama-metrics-collector-hpa.yaml │ │ │ │ ├── ama-metrics-daemonset.yaml │ │ │ │ ├── ama-metrics-deployment.yaml │ │ │ │ ├── ama-metrics-extensionIdentity.yaml │ │ │ │ ├── ama-metrics-ksm-clusterrolebinding.yaml │ │ │ │ ├── ama-metrics-ksm-deployment.yaml │ │ │ │ ├── ama-metrics-ksm-role.yaml │ │ │ │ ├── ama-metrics-ksm-service.yaml │ │ │ │ ├── ama-metrics-ksm-serviceaccount.yaml │ │ │ │ ├── ama-metrics-podmonitor-crd.yaml │ │ │ │ ├── ama-metrics-secret.yaml │ │ │ │ ├── ama-metrics-serviceAccount.yaml │ │ │ │ ├── ama-metrics-servicemonitor-crd.yaml │ │ │ │ ├── ama-metrics-targetallocator-service.yaml │ │ │ │ └── ama-metrics-targetallocator.yaml │ │ │ └── values-template.yaml │ │ └── ccp-metrics-plugin │ │ │ ├── Chart-template.yaml │ │ │ ├── templates │ │ │ ├── ama-metrics-deployment.yaml │ │ │ ├── ama-metrics-role.yaml │ │ │ ├── ama-metrics-roleBinding.yaml │ │ │ └── ama-metrics-serviceAccount.yaml │ │ │ └── values-template.yaml │ ├── chart │ │ └── prometheus-collector │ │ │ ├── .helmignore │ │ │ ├── Chart-template.yaml │ │ │ ├── templates │ │ │ ├── NOTES.txt │ │ │ ├── _helpers.tpl │ │ │ ├── prometheus-collector-azure-keyVault-secret.yaml │ │ │ ├── prometheus-collector-clusterRole.yaml │ │ │ ├── prometheus-collector-clusterRoleBinding.yaml │ │ │ ├── prometheus-collector-configmap.yaml │ │ │ ├── prometheus-collector-daemonset.yaml │ │ │ ├── prometheus-collector-deployment.yaml │ │ │ ├── prometheus-collector-secretProviderClass.yaml │ │ │ ├── prometheus-collector-serviceAccount.yaml │ │ │ └── prometheus-collector-settings-configmap.yaml │ │ │ └── values-template.yaml │ ├── dashboard │ │ ├── api-server │ │ │ └── api-server.json │ │ ├── custom-app │ │ │ └── reference-app.json │ │ ├── dns │ │ │ └── coredns.json │ │ ├── k8s │ │ │ ├── Kubernetes_ComputeResources_Cluster.json │ │ │ ├── Kubernetes_ComputeResources_Namespace(Pods).json │ │ │ ├── Kubernetes_ComputeResources_Namespace(Workloads).json │ │ │ ├── Kubernetes_ComputeResources_Node(Pods).json │ │ │ ├── Kubernetes_ComputeResources_Pod.json │ │ │ ├── Kubernetes_ComputeResources_Workload.json │ │ │ ├── Kubernetes_Kubelet.json │ │ │ ├── Kubernetes_Networking_Cluster.json │ │ │ ├── Kubernetes_Networking_Namespace(Pods).json │ │ │ ├── Kubernetes_Networking_Namespace(Workload).json │ │ │ ├── Kubernetes_Networking_Pod.json │ │ │ ├── Kubernetes_Networking_Workload.json │ │ │ ├── Kubernetes_PersistentVolumes.json │ │ │ └── Kubernetes_StatefulSets.json │ │ ├── kube-proxy │ │ │ └── Kubernetes_Proxy.json │ │ ├── node-exporter │ │ │ ├── Nodes(Nodeexporter).json │ │ │ ├── USEMethod_Cluster(Nodeexporter).json │ │ │ └── USEMethod_Node(Nodeexporter).json │ │ ├── prometheus-collector │ │ │ └── prometheus-collector-health.json │ │ └── windows │ │ │ ├── Kubernetes _ USE Method _ Cluster(Windows).json │ │ │ ├── Kubernetes _ USE Method _ Node(Windows).json │ │ │ └── recording-rules │ │ │ ├── Kubernetes _ Compute Resources _ Cluster(Windows).json │ │ │ ├── Kubernetes _ Compute Resources _ Namespace(Windows).json │ │ │ ├── Kubernetes _ Compute Resources _ Pod(Windows).json │ │ │ ├── Kubernetes _ USE Method _ Cluster(Windows).json │ │ │ └── Kubernetes _ USE Method _ Node(Windows).json │ ├── dependentcharts │ │ ├── kube-state-metrics │ │ │ ├── .helmignore │ │ │ ├── Chart.yaml │ │ │ ├── README.md │ │ │ ├── templates │ │ │ │ ├── NOTES.txt │ │ │ │ ├── _helpers.tpl │ │ │ │ ├── ciliumnetworkpolicy.yaml │ │ │ │ ├── clusterrolebinding.yaml │ │ │ │ ├── crs-configmap.yaml │ │ │ │ ├── deployment.yaml │ │ │ │ ├── extra-manifests.yaml │ │ │ │ ├── kubeconfig-secret.yaml │ │ │ │ ├── networkpolicy.yaml │ │ │ │ ├── pdb.yaml │ │ │ │ ├── podsecuritypolicy.yaml │ │ │ │ ├── psp-clusterrole.yaml │ │ │ │ ├── psp-clusterrolebinding.yaml │ │ │ │ ├── rbac-configmap.yaml │ │ │ │ ├── role.yaml │ │ │ │ ├── rolebinding.yaml │ │ │ │ ├── service.yaml │ │ │ │ ├── serviceaccount.yaml │ │ │ │ ├── servicemonitor.yaml │ │ │ │ ├── stsdiscovery-role.yaml │ │ │ │ ├── stsdiscovery-rolebinding.yaml │ │ │ │ └── verticalpodautoscaler.yaml │ │ │ └── values.yaml │ │ └── prometheus-node-exporter │ │ │ ├── .helmignore │ │ │ ├── Chart.yaml │ │ │ ├── README.md │ │ │ ├── templates │ │ │ ├── NOTES.txt │ │ │ ├── _helpers.tpl │ │ │ ├── clusterrole.yaml │ │ │ ├── clusterrolebinding.yaml │ │ │ ├── daemonset.yaml │ │ │ ├── endpoints.yaml │ │ │ ├── extra-manifests.yaml │ │ │ ├── networkpolicy.yaml │ │ │ ├── podmonitor.yaml │ │ │ ├── psp-clusterrole.yaml │ │ │ ├── psp-clusterrolebinding.yaml │ │ │ ├── psp.yaml │ │ │ ├── rbac-configmap.yaml │ │ │ ├── service.yaml │ │ │ ├── serviceaccount.yaml │ │ │ ├── servicemonitor.yaml │ │ │ └── verticalpodautoscaler.yaml │ │ │ └── values.yaml │ ├── example-custom-resources │ │ ├── pod-monitor │ │ │ ├── pod-monitor-reference-app-mtls.yaml │ │ │ └── pod-monitor-reference-app.yaml │ │ └── service-monitor │ │ │ └── service-monitor-reference-app.yaml │ ├── example-default-scrape-configs │ │ ├── default-prometheus-config-ds-linux.yaml │ │ ├── default-prometheus-config-ds-windows.yaml │ │ ├── default-prometheus-config-rs-advanced.yaml │ │ └── default-prometheus-config-rs-simple.yaml │ └── retina │ │ └── custom-files │ │ └── network-observability-service.yaml ├── fluent-bit │ ├── fluent-bit-daemonset.yaml │ ├── fluent-bit-parsers.conf │ ├── fluent-bit-windows.conf │ ├── fluent-bit.yaml │ ├── plugins_options.cmake │ └── src │ │ ├── Makefile │ │ ├── appinsights.go │ │ ├── cmetrics_decoder.go │ │ ├── go.mod │ │ ├── go.sum │ │ ├── makefile_windows.ps1 │ │ ├── out_appinsights.go │ │ ├── process_stats.go │ │ ├── prometheus_collector_health.go │ │ ├── telemetry.go │ │ └── utils.go ├── go.mod ├── go.sum ├── logrotate │ ├── crontab │ ├── logrotate │ └── prometheus-collector ├── main │ ├── README.md │ └── main.go ├── mdsd │ └── envmdsd ├── metricextension │ ├── me.config │ ├── me_ds.config │ ├── me_ds_internal.config │ ├── me_ds_internal_win.config │ ├── me_ds_win.config │ └── me_internal.config ├── opentelemetry-collector-builder │ ├── .gitignore │ ├── Makefile │ ├── PROMETHEUS_VERSION │ ├── ccp-collector-config-default.yml │ ├── ccp-collector-config-replicaset.yml │ ├── ccp-collector-config-template.yml │ ├── collector-config-default.yml │ ├── collector-config-replicaset.yml │ ├── collector-config-template.yml │ ├── components.go │ ├── go.mod │ ├── go.sum │ ├── main.go │ └── makefile_windows.ps1 ├── otel-allocator │ ├── Dockerfile │ ├── Makefile │ ├── README.md │ ├── benchmark_test.go │ ├── go.mod │ ├── go.sum │ ├── internal │ │ ├── allocation │ │ │ ├── allocator.go │ │ │ ├── allocator_test.go │ │ │ ├── consistent_hashing.go │ │ │ ├── consistent_hashing_test.go │ │ │ ├── least_weighted.go │ │ │ ├── least_weighted_test.go │ │ │ ├── per_node.go │ │ │ ├── per_node_test.go │ │ │ ├── strategy.go │ │ │ ├── strategy_test.go │ │ │ └── testutils.go │ │ ├── collector │ │ │ ├── collector.go │ │ │ └── collector_test.go │ │ ├── config │ │ │ ├── config.go │ │ │ ├── config_test.go │ │ │ ├── flags.go │ │ │ ├── flags_test.go │ │ │ └── testdata │ │ │ │ ├── config_test.yaml │ │ │ │ ├── file_sd_test.json │ │ │ │ ├── no_config.yaml │ │ │ │ ├── pod_service_selector_camelcase_expressions_test.yaml │ │ │ │ ├── pod_service_selector_camelcase_test.yaml │ │ │ │ ├── pod_service_selector_expressions_test.yaml │ │ │ │ └── pod_service_selector_test.yaml │ │ ├── diff │ │ │ ├── diff.go │ │ │ └── diff_test.go │ │ ├── prehook │ │ │ ├── prehook.go │ │ │ ├── relabel.go │ │ │ └── relabel_test.go │ │ ├── server │ │ │ ├── bench_test.go │ │ │ ├── mocks_test.go │ │ │ ├── server.go │ │ │ ├── server_test.go │ │ │ └── testdata │ │ │ │ ├── prom-config-all-actions.yaml │ │ │ │ ├── prom-config-test.yaml │ │ │ │ └── prom-no-config.yaml │ │ ├── target │ │ │ ├── discovery.go │ │ │ ├── discovery_test.go │ │ │ ├── target.go │ │ │ └── testdata │ │ │ │ ├── test.yaml │ │ │ │ └── test_update.yaml │ │ └── watcher │ │ │ ├── promOperator.go │ │ │ ├── promOperator_test.go │ │ │ └── watcher.go │ └── main.go ├── prom-config-validator-builder │ ├── Makefile │ ├── components.go │ ├── go.mod │ ├── go.sum │ ├── main.go │ ├── makefile_windows.ps1 │ └── prometheus-config.yaml ├── prometheus-ui │ ├── Makefile │ ├── assets_embed.go │ ├── go.mod │ ├── go.sum │ ├── main.go │ └── makefile_windows.ps1 ├── prometheusreceiver │ ├── DESIGN.md │ ├── Makefile │ ├── README.md │ ├── apiserver │ │ ├── config.go │ │ └── manager.go │ ├── config.go │ ├── config_test.go │ ├── doc.go │ ├── factory.go │ ├── factory_test.go │ ├── generated_component_test.go │ ├── generated_package_test.go │ ├── go.mod │ ├── go.sum │ ├── internal │ │ ├── appendable.go │ │ ├── logger.go │ │ ├── logger_test.go │ │ ├── metadata.go │ │ ├── metadata │ │ │ └── generated_status.go │ │ ├── metricfamily.go │ │ ├── metricfamily_test.go │ │ ├── metrics_adjuster.go │ │ ├── metrics_adjuster_test.go │ │ ├── metricsutil_test.go │ │ ├── prom_to_otlp.go │ │ ├── prom_to_otlp_test.go │ │ ├── staleness_end_to_end_test.go │ │ ├── starttimemetricadjuster.go │ │ ├── starttimemetricadjuster_test.go │ │ ├── transaction.go │ │ ├── transaction_test.go │ │ ├── util.go │ │ └── util_test.go │ ├── metadata.yaml │ ├── metrics_receiver.go │ ├── metrics_receiver_helper_test.go │ ├── metrics_receiver_honor_timestamp_test.go │ ├── metrics_receiver_labels_test.go │ ├── metrics_receiver_metric_name_normalize_test.go │ ├── metrics_receiver_non_numerical_test.go │ ├── metrics_receiver_open_metrics_test.go │ ├── metrics_receiver_protobuf_test.go │ ├── metrics_receiver_report_extra_scrape_metrics_test.go │ ├── metrics_receiver_scrape_config_files_test.go │ ├── metrics_receiver_test.go │ ├── metrics_reciever_metric_rename_test.go │ ├── metricsreceiver_api_server_test.go │ ├── scrapeloop-flowchart.png │ └── targetallocator │ │ ├── config.go │ │ ├── config_test.go │ │ ├── manager.go │ │ ├── manager_test.go │ │ └── testdata │ │ ├── config.yaml │ │ ├── dummy-tls-cert-file │ │ └── dummy-tls-key-file ├── release.sh ├── scripts │ ├── ccpsetup.sh │ ├── setup-configreader.sh │ └── setup.sh ├── shared │ ├── arc_eula.go │ ├── configmap │ │ ├── ccp │ │ │ ├── configmapparserforccp.go │ │ │ ├── definitions.go │ │ │ ├── go.mod │ │ │ ├── go.sum │ │ │ ├── prometheus-ccp-config-merger-test.go │ │ │ ├── prometheus-ccp-config-merger.go │ │ │ ├── tomlparser-ccp-default-scrape-settings.go │ │ │ ├── tomlparser-ccp-default-targets-metrics-keep-list.go │ │ │ └── tomlparser-ccp-prometheus-collector-settings.go │ │ └── mp │ │ │ ├── configmapparser.go │ │ │ ├── configmapparser_test.go │ │ │ ├── configmapsettings_suite_test.go │ │ │ ├── definitions.go │ │ │ ├── go.mod │ │ │ ├── go.sum │ │ │ ├── prometheus-config-merger.go │ │ │ ├── testdata │ │ │ ├── collector-config-replicaset.yml │ │ │ ├── default-linux-ds.yaml │ │ │ └── default-linux-rs.yaml │ │ │ ├── tomlparser-debug-mode.go │ │ │ ├── tomlparser-default-scrape-settings.go │ │ │ ├── tomlparser-default-targets-metrics-keep-list.go │ │ │ ├── tomlparser-pod-annotation-based-scraping.go │ │ │ ├── tomlparser-pod-annotation-based-scraping_test.go │ │ │ ├── tomlparser-prometheus-collector-settings.go │ │ │ ├── tomlparser-scrape-interval.go │ │ │ ├── tomlparser-set-global-config.go │ │ │ └── tomlparser_debug_mode_test.go │ ├── file_utilities.go │ ├── filesystemwatcher_utility.go │ ├── go.mod │ ├── go.sum │ ├── helpers.go │ ├── logger_utility.go │ ├── otel_config.go │ ├── process_utilities_linux.go │ ├── process_utilities_windows.go │ ├── proxy_settings.go │ └── telemetry.go └── test │ ├── README.md │ ├── arc-conformance │ ├── Dockerfile │ ├── README.md │ ├── arc-conformance.yaml │ ├── e2e_tests.sh │ └── local-e2e-tests.yaml │ ├── ci-cd │ ├── ci-cd-cluster.json │ └── ci-cd-cluster.parameters.json │ ├── ginkgo-e2e │ ├── configprocessing │ │ ├── config_processing_test.go │ │ ├── go.mod │ │ ├── go.sum │ │ └── suite_test.go │ ├── containerstatus │ │ ├── container_status_test.go │ │ ├── go.mod │ │ ├── go.sum │ │ └── suite_test.go │ ├── livenessprobe │ │ ├── go.mod │ │ ├── go.sum │ │ ├── liveness_test.go │ │ └── suite_test.go │ ├── operator │ │ ├── go.mod │ │ ├── go.sum │ │ ├── operator_suite_test.go │ │ └── operator_test.go │ ├── prometheusui │ │ ├── go.mod │ │ ├── go.sum │ │ ├── prometheus_ui_test.go │ │ └── suite_test.go │ ├── querymetrics │ │ ├── go.mod │ │ ├── go.sum │ │ ├── query_metrics_test.go │ │ └── suite_test.go │ ├── regionTests │ │ ├── go.mod │ │ ├── go.sum │ │ └── regionTests_suite_test.go │ ├── update-go-packages.sh │ └── utils │ │ ├── amw_query_api_utils.go │ │ ├── constants.go │ │ ├── go.mod │ │ ├── go.sum │ │ ├── kubernetes_api_utils.go │ │ ├── operator_utils.go │ │ ├── prometheus_ui_api_utils.go │ │ └── setup_utils.go │ ├── test-cluster-yamls │ ├── configmaps │ │ ├── ama-metrics-prometheus-config-configmap.yaml │ │ ├── ama-metrics-prometheus-config-node-configmap.yaml │ │ ├── ama-metrics-prometheus-config-node-windows-configmap.yaml │ │ ├── ama-metrics-settings-configmap-v2.yaml │ │ ├── ama-metrics-settings-configmap.yaml │ │ ├── controlplane │ │ │ ├── ama-metrics-settings-configmap-mipfalse-emptykeep.yaml │ │ │ └── ama-metrics-settings-configmap-mipfalse-keepmetrics.yaml │ │ ├── custom-config-map-node │ │ │ └── ama-metrics-prometheus-config-node-configmap-errors.yaml │ │ ├── custom-config-map-win │ │ │ └── ama-metrics-prometheus-config-node-windows-configmap-errors.yaml │ │ ├── custom-config-map │ │ │ ├── ama-metrics-prometheus-config-configmap-all-actions.yaml │ │ │ └── ama-metrics-prometheus-config-configmap-with-error.yaml │ │ ├── default-config-map │ │ │ ├── ama-metrics-settings-configmap-all-targets-disabled.yaml │ │ │ ├── ama-metrics-settings-configmap-all-targets-enabled.yaml │ │ │ ├── ama-metrics-settings-configmap-defaults-targets-turned-on.yaml │ │ │ ├── ama-metrics-settings-configmap-ds-targets-enabled.yaml │ │ │ ├── ama-metrics-settings-configmap-error.yaml │ │ │ └── ama-metrics-settings-configmap-rs-targets-enabled.yaml │ │ └── global-settings │ │ │ ├── ama-metrics-prometheus-config-configmap-with-custom-global.yaml │ │ │ └── ama-metrics-prometheus-config-configmap-with-global-error.yaml │ └── customresources │ │ └── prometheus-reference-app.yaml │ └── testkube │ ├── api-server-permissions.yaml │ ├── config-processing-test-crs │ ├── testkube-config-test-all-ds-targets-enabled-crs.yaml │ ├── testkube-config-test-all-rs-targets-enabled-crs.yaml │ ├── testkube-config-test-all-targets-disabled-crs.yaml │ ├── testkube-config-test-all-targets-enabled-crs.yaml │ ├── testkube-config-test-custom-configmap-error-crs.yaml │ ├── testkube-config-test-custom-node-configmap-crs.yaml │ ├── testkube-config-test-default-targets-on-crs.yaml │ ├── testkube-config-test-global-ext-labels-error-crs.yaml │ ├── testkube-config-test-global-settings-crs.yaml │ ├── testkube-config-test-no-configmaps-crs.yaml │ ├── testkube-config-test-only-custom-configmap-crs.yaml │ └── testkube-config-test-settings-error-crs.yaml │ ├── job-template.yaml │ ├── testkube-test-crs-arc.yaml │ ├── testkube-test-crs-otel.yaml │ ├── testkube-test-crs.yaml │ └── values.yaml └── tools └── az-prom-rules-converter ├── .gitignore ├── README.md ├── dist ├── cli.d.ts ├── cli.js ├── index.d.ts ├── index.js ├── schemas │ ├── azure │ │ ├── azure-common-types.json │ │ └── azure-prometheus-rule-group.json │ └── prometheus │ │ └── prometheus.rules.json ├── steps │ ├── to-arm-template.d.ts │ ├── to-arm-template.js │ ├── to-azure │ │ ├── to-azure-prom-alerting-rule.d.ts │ │ ├── to-azure-prom-alerting-rule.js │ │ ├── to-azure-prom-recording-rule.d.ts │ │ ├── to-azure-prom-recording-rule.js │ │ ├── to-azure-prom-rule-group-properties.d.ts │ │ ├── to-azure-prom-rule-group-properties.js │ │ ├── to-azure-prom-rule-group.d.ts │ │ └── to-azure-prom-rule-group.js │ ├── validate-arm-template.d.ts │ ├── validate-arm-template.js │ ├── validate-input-not-empty.d.ts │ ├── validate-input-not-empty.js │ ├── validate-prom-schemas.d.ts │ ├── validate-prom-schemas.js │ ├── validations │ │ ├── validate-azure-prom-schema.d.ts │ │ └── validate-azure-prom-schema.js │ ├── yaml2json.d.ts │ └── yaml2json.js ├── types │ ├── prometheus-rules.d.ts │ ├── prometheus-rules.js │ ├── step-result.d.ts │ └── step-result.js └── utils │ ├── converter.d.ts │ ├── converter.js │ ├── prom-duration-to-iso8601.d.ts │ └── prom-duration-to-iso8601.js ├── examples ├── json-example1.json ├── result1.json ├── template-example.json └── yaml-example1.yml ├── jest.config.js ├── package-lock.json ├── package.json ├── src ├── cli.ts ├── index.test.ts ├── index.ts ├── schemas │ ├── azure │ │ ├── azure-common-types.json │ │ └── azure-prometheus-rule-group.json │ └── prometheus │ │ └── prometheus.rules.json ├── steps │ ├── to-arm-template.test.ts │ ├── to-arm-template.ts │ ├── to-azure │ │ ├── to-azure-prom-alerting-rule.test.ts │ │ ├── to-azure-prom-alerting-rule.ts │ │ ├── to-azure-prom-recording-rule.ts │ │ ├── to-azure-prom-rule-group-properties.test.ts │ │ ├── to-azure-prom-rule-group-properties.ts │ │ └── to-azure-prom-rule-group.ts │ ├── validate-arm-template.ts │ ├── validate-input-not-empty.ts │ ├── validate-prom-schemas.ts │ ├── validations │ │ ├── validate-azure-prom-schema.test.ts │ │ └── validate-azure-prom-schema.ts │ ├── yaml2json.test.ts │ └── yaml2json.ts ├── types │ ├── prometheus-rules.ts │ └── step-result.ts └── utils │ ├── converter.ts │ ├── prom-duration-to-iso8601.test.ts │ └── prom-duration-to-iso8601.ts └── tsconfig.json /.config/1espt/PipelineAutobaseliningConfig.yml: -------------------------------------------------------------------------------- 1 | ## DO NOT MODIFY THIS FILE MANUALLY. This is part of auto-baselining from 1ES Pipeline Templates. Go to [https://aka.ms/1espt-autobaselining] for more details. 2 | 3 | pipelines: 4 | 440: 5 | retail: 6 | source: 7 | credscan: 8 | lastModifiedDate: 2025-05-02 9 | eslint: 10 | lastModifiedDate: 2025-05-02 11 | psscriptanalyzer: 12 | lastModifiedDate: 2025-05-02 13 | armory: 14 | lastModifiedDate: 2025-05-02 15 | binary: 16 | credscan: 17 | lastModifiedDate: 2025-05-02 18 | binskim: 19 | lastModifiedDate: 2025-05-02 20 | spotbugs: 21 | lastModifiedDate: 2025-05-02 22 | -------------------------------------------------------------------------------- /.config/guardian/.gdnbaselines: -------------------------------------------------------------------------------- 1 | { 2 | "properties": { 3 | "helpUri": "https://eng.ms/docs/microsoft-security/security/azure-security/cloudai-security-fundamentals-engineering/security-integration/guardian-wiki/microsoft-guardian/general/baselines" 4 | }, 5 | "version": "1.0.0", 6 | "baselines": { 7 | "default": { 8 | "name": "default", 9 | "createdDate": "2025-05-02 19:41:47Z", 10 | "lastUpdatedDate": "2025-05-02 19:41:47Z" 11 | } 12 | }, 13 | "results": { 14 | "2bc159ebba334c55965b2ac505bdd4eb178d1a9ed6dc0abc640b7ffb7dcfafea": { 15 | "signature": "2bc159ebba334c55965b2ac505bdd4eb178d1a9ed6dc0abc640b7ffb7dcfafea", 16 | "alternativeSignatures": [ 17 | "a648663d93fe23ba9d5102c2de69f518f7b2fd568eb629b23cd60e6e9d017f9e" 18 | ], 19 | "target": "otelcollector/otel-allocator/internal/server/server_test.go", 20 | "line": 494, 21 | "memberOf": [ 22 | "default" 23 | ], 24 | "tool": "credscan", 25 | "ruleId": "CSCAN-GENERAL0060", 26 | "createdDate": "2025-05-02 19:45:07Z", 27 | "expirationDate": "2025-10-19 21:15:02Z", 28 | "justification": "This error is baselined with an expiration date of 180 days from 2025-05-02 21:15:02Z" 29 | } 30 | } 31 | } -------------------------------------------------------------------------------- /.github/workflows/scan-released-image.yml: -------------------------------------------------------------------------------- 1 | name: scan-last-released-image 2 | on: 3 | schedule: 4 | # At the end of every day 5 | - cron: "0 0 * * *" 6 | jobs: 7 | scan-image: 8 | runs-on: ubuntu-latest 9 | steps: 10 | - name: Run-trivy-scanner-on-last-released-docker-image 11 | uses: aquasecurity/trivy-action@master 12 | with: 13 | image-ref: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod/prometheus-collector/images:5.0.0-main-09-15-2022-c5d54419" 14 | format: 'table' 15 | severity: 'CRITICAL,HIGH' 16 | vuln-type: 'os,library' 17 | exit-code: '1' 18 | timeout: '5m0s' 19 | -------------------------------------------------------------------------------- /.github/workflows/scan.yml: -------------------------------------------------------------------------------- 1 | name: scan-any-public-image 2 | on: 3 | workflow_dispatch: 4 | inputs: 5 | imageToScan: 6 | description: 'Full image path in any public registry. ex;- mcr.microsoft.com/azuremonitor/containerinsights/cidev:cidevcfa804a1adeb3eb4e82d78f14569e3238e2f6dbd' 7 | required: true 8 | jobs: 9 | scan-image: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - name: Set-workflow-initiator 13 | run: echo "Initiated by - ${GITHUB_ACTOR}" 14 | - name: Run-trivy-scanner-on-docker-image 15 | uses: aquasecurity/trivy-action@master 16 | with: 17 | image-ref: "${{ github.event.inputs.imageToScan }}" 18 | format: 'table' 19 | #ignore format 20 | #output: 'trivy-results.sarif' 21 | severity: 'CRITICAL,HIGH' 22 | vuln-type: 'os,library' 23 | #skip-dirs: '/opt/telegraf,/opt/microsoft/otelcollector' 24 | exit-code: '1' 25 | timeout: '5m0s' 26 | -------------------------------------------------------------------------------- /.github/workflows/size.yml: -------------------------------------------------------------------------------- 1 | name: size-label #https://github.com/kubernetes/kubernetes/labels?q=size 2 | on: 3 | pull_request: 4 | branches: 5 | - main 6 | jobs: 7 | size-label: 8 | runs-on: ubuntu-latest 9 | permissions: 10 | contents: read 11 | pull-requests: write 12 | steps: 13 | - name: size-label 14 | uses: "pascalgn/size-label-action@v0.5.5" 15 | env: 16 | GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}" 17 | -------------------------------------------------------------------------------- /.github/workflows/stale.yml: -------------------------------------------------------------------------------- 1 | name: Mark stale issues and pull requests 2 | 3 | on: 4 | schedule: 5 | - cron: "30 10 * * *" 6 | 7 | jobs: 8 | stale: 9 | 10 | runs-on: ubuntu-latest 11 | permissions: 12 | issues: write 13 | pull-requests: write 14 | 15 | steps: 16 | - uses: actions/stale@v9 17 | with: 18 | repo-token: ${{ secrets.GITHUB_TOKEN }} 19 | days-before-issue-stale: 7 20 | days-before-pr-stale: 7 21 | stale-issue-message: 'This issue is stale because it has been open 7 days with no activity. Remove stale label or comment or this will be closed in 5 days.' 22 | stale-pr-message: 'This PR is stale because it has been open 7 days with no activity. Remove stale label or comment or this will be closed in 5 days.' 23 | close-issue-message: 'This issue was closed because it has been stalled for 12 days with no activity.' 24 | close-pr-message: 'This PR was closed because it has been stalled for 12 days with no activity.' 25 | days-before-issue-close: 5 26 | days-before-pr-close: 5 27 | stale-issue-label: 'no-issue-activity' 28 | stale-pr-label: 'no-pr-activity' 29 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Binaries for programs and plugins 2 | *.exe 3 | *.exe~ 4 | *.dll 5 | *.so 6 | *.dylib 7 | *.tgz 8 | #otelcollector 9 | promconfigvalidator 10 | 11 | # Test binary, built with `go test -c` 12 | *.test 13 | 14 | # Output of the go coverage tool, specifically when used with LiteIDE 15 | *.out 16 | 17 | # Dependency directories (remove the comment below to include it) 18 | # vendor/ 19 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure/prometheus-collector/1ab1718ac5543b703f93c5ff6e7bdde0c633164e/.gitmodules -------------------------------------------------------------------------------- /.pipelines/azure-pipeline-aksdeploy-test.yml: -------------------------------------------------------------------------------- 1 | trigger: 2 | - none 3 | 4 | pool: 5 | vmImage: windows-latest 6 | 7 | stages: 8 | - stage: Deploy 9 | jobs: 10 | - job: DeployJob 11 | steps: 12 | - template: azure-template-aksdeploy.yml 13 | parameters: 14 | azureSubscription: $(AZURESUBSCRIPTION) 15 | resourceGroup: $(RESOURCE-GROUP) 16 | deployParameters: ${env:PARAMETERS} 17 | - task: PowerShell@2 18 | displayName: "Wait" 19 | inputs: 20 | targetType: 'inline' 21 | script: | 22 | $sleepTime = ${env:SLEEPTIME_IN_SECONDS} 23 | Get-Date 24 | Write-host "Allowing cluster to run $sleepTime seconds before testing..." 25 | Sleep -Seconds $sleepTime 26 | 27 | - stage: Test 28 | dependsOn: Deploy 29 | jobs: 30 | - job: TestJob 31 | steps: 32 | - template: azure-template-regionstest.yml 33 | parameters: 34 | azureSubscription: $(AZURESUBSCRIPTION) 35 | resourceGroup: $(RESOURCE-GROUP) 36 | clusterName: $(CLUSTERNAME) 37 | -------------------------------------------------------------------------------- /.pipelines/azure-pipeline-aksdeploy.yml: -------------------------------------------------------------------------------- 1 | # Starter pipeline 2 | # Start with a minimal pipeline that you can customize to build and deploy your code. 3 | # Add steps that build, run tests, deploy, and more: 4 | # https://aka.ms/yaml 5 | 6 | trigger: 7 | - none 8 | 9 | pool: 10 | vmImage: windows-latest 11 | 12 | steps: 13 | - template: azure-template-aksdeploy.yml 14 | parameters: 15 | azureSubscription: $(AZURESUBSCRIPTION) 16 | resourceGroup: $(RESOURCE-GROUP) 17 | deployParameters: ${env:PARAMETERS} -------------------------------------------------------------------------------- /.pipelines/azure-pipeline-regionstest.yml: -------------------------------------------------------------------------------- 1 | # Starter pipeline 2 | # Start with a minimal pipeline that you can customize to build and deploy your code. 3 | # Add steps that build, run tests, deploy, and more: 4 | # https://aka.ms/yaml 5 | 6 | trigger: 7 | - none 8 | 9 | pool: 10 | vmImage: windows-latest 11 | 12 | steps: 13 | - template: azure-template-regionstest.yml 14 | parameters: 15 | azureSubscription: $(AZURESUBSCRIPTION) 16 | resourceGroup: $(RESOURCE-GROUP) 17 | clusterName: $(CLUSTERNAME) 18 | 19 | -------------------------------------------------------------------------------- /.pipelines/azure-template-aksdeploy.yml: -------------------------------------------------------------------------------- 1 | parameters: 2 | - name: azureSubscription 3 | type: string 4 | default: 'ContainerInsights_Dev_Grace' 5 | - name: resourceGroup 6 | type: string 7 | default: 'rg' 8 | - name: deployParameters 9 | type: string 10 | 11 | steps: 12 | - task: AzureCLI@2 13 | displayName: "Deploy AKS cluster" 14 | inputs: 15 | azureSubscription: ${{ parameters.azureSubscription }} 16 | scriptType: ps 17 | scriptLocation: inlineScript 18 | inlineScript: |+ 19 | az --version 20 | az account show 21 | az deployment group create ` 22 | --resource-group ${{ parameters.resourceGroup }} ` 23 | --name ClusterDeployment ` 24 | --template-file ".\otelcollector\test\ci-cd\ci-cd-cluster.json" ` 25 | --parameters ${{ parameters.deployParameters }} 26 | 27 | -------------------------------------------------------------------------------- /.pipelines/cgmanifest.json: -------------------------------------------------------------------------------- 1 | { 2 | "Registrations":[ 3 | { 4 | "component": { 5 | "type": "git", 6 | "git": { 7 | "repositoryUrl": "https://github.com/fluent/fluent-bit", 8 | "commitHash": "82f03f8fb7198c82e1ad320d4360b30a8fecac23" 9 | } 10 | } 11 | }, 12 | { 13 | "component": { 14 | "type": "git", 15 | "git": { 16 | "repositoryUrl": "https://github.com/influxdata/telegraf", 17 | "commitHash": "a60db9ba2fd3260a99856d52e40850ef79bb2778" 18 | } 19 | } 20 | }, 21 | { 22 | "component": { 23 | "type": "git", 24 | "git": { 25 | "repositoryUrl": "https://github.com/prometheus/prometheus", 26 | "commitHash": "411021ada9ab41095923b8d2df9365b632fd40c3" 27 | } 28 | } 29 | } 30 | ], 31 | "Version": 1 32 | } 33 | -------------------------------------------------------------------------------- /.pipelines/deployment/ServiceGroupRoot/buildver.txt: -------------------------------------------------------------------------------- 1 | 0.0.1 -------------------------------------------------------------------------------- /.pipelines/deployment/arc-extension-release/ServiceGroupRoot/RolloutSpecs/RolloutSpecs.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "http://schema.express.azure.com/schemas/2015-01-01-alpha/RolloutSpec.json", 3 | "ContentVersion": "1.0.0.0", 4 | "RolloutMetadata": { 5 | "ServiceModelPath": "ServiceModels//Public.ServiceModel.json", 6 | "ScopeBindingsPath": "ScopeBindings//Public.ScopeBindings.json", 7 | "Name": "PrometheusCollectorExtension-Stable", 8 | "RolloutType": "Major", 9 | "BuildSource": { 10 | "Parameters": { 11 | "VersionFile": "buildver.txt" 12 | } 13 | }, 14 | "notification": { 15 | "email": { 16 | "to": "ciprometheus@microsoft.com" 17 | } 18 | } 19 | }, 20 | "orchestratedSteps": [ 21 | { 22 | "name": "ArcExtensionRelease", 23 | "targetType": "ServiceResource", 24 | "targetName": "ArcExtensionRelease", 25 | "actions": [ "Shell/ArcExtensionRelease" ], 26 | "dependsOn": [ ] 27 | } 28 | ] 29 | } -------------------------------------------------------------------------------- /.pipelines/deployment/arc-extension-release/ServiceGroupRoot/buildver.txt: -------------------------------------------------------------------------------- 1 | 0.0.1 -------------------------------------------------------------------------------- /.trivyignore: -------------------------------------------------------------------------------- 1 | # HIGH 2 | # targetallocator 3 | CVE-2025-22869 # golang.org/x/crypto 4 | CVE-2025-30204 # github.com/golang-jwt/jwt/v5 5 | # MEDIUM 6 | # otelcollector 7 | CVE-2025-22872 # golang.org/x/net 8 | # promconfigvalidator 9 | CVE-2025-22872 # golang.org/x/net 10 | # prometheusui 11 | CVE-2025-22872 # golang.org/x/net 12 | # targetallocator 13 | CVE-2025-22870 # golang.org/x/net -------------------------------------------------------------------------------- /AddonArmTemplate/FullAzureMonitorMetricsProfileParameters.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentParameters.json#", 3 | "contentVersion": "1.0.0.0", 4 | "parameters": { 5 | "azureMonitorWorkspaceResourceId": { 6 | "value": "/subscriptions/{sub_id}/resourceGroups/{rg_name}/providers/microsoft.monitor/accounts/{amw_name}" 7 | }, 8 | "azureMonitorWorkspaceLocation": { 9 | "value": "{amwLocation}" 10 | }, 11 | "clusterResourceId": { 12 | "value": "/subscriptions/{sub_id}/resourcegroups/{rg_name}/providers/Microsoft.ContainerService/managedClusters/{cluster_name}" 13 | }, 14 | "clusterLocation": { 15 | "value": "{clusterLocation}" 16 | }, 17 | "metricLabelsAllowlist": { 18 | "value": "" 19 | }, 20 | "metricAnnotationsAllowList": { 21 | "value": "" 22 | }, 23 | "enableWindowsRecordingRules": { 24 | "value": false 25 | }, 26 | "grafanaResourceId": { 27 | "value": "/subscriptions/{sub_id}/resourceGroups/{rg_name}/providers/Microsoft.Dashboard/grafana/{grafana_name}" 28 | }, 29 | "grafanaLocation": { 30 | "value": "{grafanaLocation}" 31 | }, 32 | "grafanaSku": { 33 | "value": "Standard" 34 | } 35 | } 36 | } -------------------------------------------------------------------------------- /AddonArmTemplate/README.md: -------------------------------------------------------------------------------- 1 | You can deploy the templates using a command like : 2 | 3 | ```az deployment group create -g -n --template-file .\FullAzureMonitorMetricsProfile.json --parameters .\FullAzureMonitorMetricsProfileParameters.json``` 4 | 5 | **NOTE** 6 | 7 | - Please edit the FullAzureMonitorMetricsProfileParameters.json file appropriately before running the ARM tempalte 8 | - Users with 'User Access Administrator' role in the subscription of the AKS cluster can be able to enable 'Monitoring Data Reader' role directly by deploying the template. 9 | - Please add in any existing azureMonitorWorkspaceIntegrations values to the grafana resource before running the template otherwise the older values will get deleted and replaced with what is there in the template at the time of deployment 10 | - Please edit the grafanaSku parameter if you are using a non standard SKU. 11 | - Please run this template in the Grafana Resources RG. 12 | -------------------------------------------------------------------------------- /AddonArmTemplate/WindowsRecordingRuleGroupTemplate/WindowsRecordingRulesParameters.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentParameters.json#", 3 | "contentVersion": "1.0.0.0", 4 | "parameters": { 5 | "azureMonitorWorkspaceResourceId": { 6 | "value": "/subscriptions/{sub_id}/resourceGroups/{rg_name}/providers/microsoft.monitor/accounts/{amw_name}" 7 | }, 8 | "azureMonitorWorkspaceLocation": { 9 | "value": "{amwLocation}" 10 | }, 11 | "clusterResourceId": { 12 | "value": "/subscriptions/{sub_id}/resourcegroups/{rg_name}/providers/Microsoft.ContainerService/managedClusters/{cluster_name}" 13 | } 14 | } 15 | } -------------------------------------------------------------------------------- /AddonBicepTemplate/nested_azuremonitormetrics_dcra_clusterResourceId.bicep: -------------------------------------------------------------------------------- 1 | param resourceId_Microsoft_Insights_dataCollectionRules_variables_dcrName string 2 | param variables_clusterName string 3 | param variables_dcraName string 4 | param clusterLocation string 5 | 6 | #disable-next-line BCP174 // This warning is a false positive as dcra is already 'scope'-ed to the resource group in main template 7 | resource variables_clusterName_microsoft_insights_variables_dcra 'Microsoft.ContainerService/managedClusters/providers/dataCollectionRuleAssociations@2022-06-01' = { 8 | name: '${variables_clusterName}/microsoft.insights/${variables_dcraName}' 9 | location: clusterLocation 10 | properties: { 11 | description: 'Association of data collection rule. Deleting this association will break the data collection for this AKS Cluster.' 12 | dataCollectionRuleId: resourceId_Microsoft_Insights_dataCollectionRules_variables_dcrName 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /AddonBicepTemplate/nested_azuremonitormetrics_profile_clusterResourceId.bicep: -------------------------------------------------------------------------------- 1 | param variables_clusterName string 2 | param clusterLocation string 3 | param metricLabelsAllowlist string 4 | param metricAnnotationsAllowList string 5 | 6 | resource variables_cluster 'Microsoft.ContainerService/managedClusters@2023-01-01' = { 7 | name: variables_clusterName 8 | location: clusterLocation 9 | properties: { 10 | azureMonitorProfile: { 11 | metrics: { 12 | enabled: true 13 | kubeStateMetrics: { 14 | metricLabelsAllowlist: metricLabelsAllowlist 15 | metricAnnotationsAllowList: metricAnnotationsAllowList 16 | } 17 | } 18 | } 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /AddonBicepTemplate/nested_grafana_amw_role_assignment.bicep: -------------------------------------------------------------------------------- 1 | param grafanaPrincipalId string 2 | param azureMonitorWorkspaceSubscriptionId string 3 | 4 | @description('A new GUID used to identify the role assignment for Grafana') 5 | param roleNameGuid string = newGuid() 6 | 7 | resource roleAssignmentLocal 'Microsoft.Authorization/roleAssignments@2022-04-01' = { 8 | name: roleNameGuid 9 | properties: { 10 | roleDefinitionId: '/subscriptions/${azureMonitorWorkspaceSubscriptionId}/providers/Microsoft.Authorization/roleDefinitions/b0d8363b-8ddd-447d-831f-62ca05bff136' 11 | principalId: grafanaPrincipalId 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /AddonBicepTemplate/recommendedMetricAlertsProfileParameters.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentParameters.json#", 3 | "contentVersion": "1.0.0.0", 4 | "parameters": { 5 | "monitorWorkspaceName": { 6 | "value": "/subscriptions/{sub_id}/resourceGroups/{rg_name}/providers/microsoft.monitor/accounts/{amw_name}" 7 | }, 8 | "location": { 9 | "value": "{amwLocation}" 10 | }, 11 | "aksResourceId": { 12 | "value": "/subscriptions/{sub_id}/resourcegroups/{rg_name}/providers/Microsoft.ContainerService/managedClusters/{cluster_name}" 13 | }, 14 | "actionGroupResourceId": { 15 | "value": "/subscriptions/{sub_id}/resourceGroups/{rg_name}/providers/microsoft.insights/actionGroups/{testGroupName}" 16 | } 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /AddonTerraformTemplate/README.md: -------------------------------------------------------------------------------- 1 | If you are deploying a new AKS cluster using Terraform with managed Prometheus addon enabled, follow the steps below. 2 | 3 | 1. Please download all files under AddonTerraformTemplate. 4 | 2. Run `terraform init -upgrade` to initialize the Terraform deployment. 5 | 3. Run `terraform plan -out main.tfplan` to initialize the Terraform deployment. 6 | 3. Run `terraform apply main.tfplan` to apply the execution plan to your cloud infrastructure. 7 | 8 | 9 | Note: Pass the variables for `annotations_allowed` and `labels_allowed` keys only when those values exist. These are optional blocks. 10 | 11 | **NOTE** 12 | - Please edit the main.tf file appropriately before running the terraform template 13 | - Please add in any existing azure_monitor_workspace_integrations values to the grafana resource before running the template otherwise the older values will get deleted and replaced with what is there in the template at the time of deployment 14 | - Users with 'User Access Administrator' role in the subscription of the AKS cluster can be able to enable 'Monitoring Data Reader' role directly by deploying the template. 15 | - Please edit the grafanaSku parameter if you are using a non standard SKU. 16 | - Please run this template in the Grafana Resources RG. 17 | -------------------------------------------------------------------------------- /AddonTerraformTemplate/outputs.tf: -------------------------------------------------------------------------------- 1 | output "client_certificate" { 2 | value = azurerm_kubernetes_cluster.k8s.kube_config[0].client_certificate 3 | sensitive = true 4 | } 5 | 6 | output "client_key" { 7 | value = azurerm_kubernetes_cluster.k8s.kube_config[0].client_key 8 | sensitive = true 9 | } 10 | 11 | output "cluster_ca_certificate" { 12 | value = azurerm_kubernetes_cluster.k8s.kube_config[0].cluster_ca_certificate 13 | sensitive = true 14 | } 15 | 16 | output "cluster_password" { 17 | value = azurerm_kubernetes_cluster.k8s.kube_config[0].password 18 | sensitive = true 19 | } 20 | 21 | output "cluster_username" { 22 | value = azurerm_kubernetes_cluster.k8s.kube_config[0].username 23 | sensitive = true 24 | } 25 | 26 | output "host" { 27 | value = azurerm_kubernetes_cluster.k8s.kube_config[0].host 28 | sensitive = true 29 | } 30 | 31 | output "kube_config" { 32 | value = azurerm_kubernetes_cluster.k8s.kube_config_raw 33 | sensitive = true 34 | } 35 | 36 | output "resource_group_name" { 37 | value = azurerm_resource_group.rg.name 38 | } 39 | -------------------------------------------------------------------------------- /AddonTerraformTemplate/providers.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | azurerm = { 4 | source = "hashicorp/azurerm" 5 | version = "~>3.0" 6 | } 7 | } 8 | } 9 | 10 | provider "azurerm" { 11 | features {} 12 | } 13 | -------------------------------------------------------------------------------- /ArcArmTemplate/FullAzureMonitorMetricsProfileParameters.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentParameters.json#", 3 | "contentVersion": "1.0.0.0", 4 | "parameters": { 5 | "azureMonitorWorkspaceResourceId": { 6 | "value": "/subscriptions/{sub_id}/resourceGroups/{rg_name}/providers/microsoft.monitor/accounts/{amw_name}" 7 | }, 8 | "azureMonitorWorkspaceLocation": { 9 | "value": "{amwLocation}" 10 | }, 11 | "clusterResourceId": { 12 | "value": "/subscriptions/{sub_id}/resourcegroups/{rg_name}/providers/Microsoft.Kubernetes/connectedClusters/{cluster_name}" 13 | }, 14 | "clusterLocation": { 15 | "value": "{clusterLocation}" 16 | }, 17 | "metricLabelsAllowlist": { 18 | "value": "" 19 | }, 20 | "metricAnnotationsAllowList": { 21 | "value": "" 22 | }, 23 | "grafanaResourceId": { 24 | "value": "/subscriptions/{sub_id}/resourceGroups/{rg_name}/providers/Microsoft.Dashboard/grafana/{grafana_instance_name}" 25 | }, 26 | "grafanaLocation": { 27 | "value": "{grafanaLocation}" 28 | }, 29 | "grafanaSku": { 30 | "value": "Standard" 31 | } 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /ArcBicepTemplate/FullAzureMonitorMetricsProfileParameters.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentParameters.json#", 3 | "contentVersion": "1.0.0.0", 4 | "parameters": { 5 | "azureMonitorWorkspaceResourceId": { 6 | "value": "/subscriptions/{sub_id}/resourceGroups/{rg_name}/providers/microsoft.monitor/accounts/{cluster_name}" 7 | }, 8 | "azureMonitorWorkspaceLocation": { 9 | "value": "{amwLocation}" 10 | }, 11 | "clusterResourceId": { 12 | "value": "/subscriptions/{sub_id}/resourcegroups/{rg_name}/providers/Microsoft.ContainerService/managedClusters/{cluster_name}" 13 | }, 14 | "clusterLocation": { 15 | "value": "{clusterLocation}" 16 | }, 17 | "metricLabelsAllowlist": { 18 | "value": "" 19 | }, 20 | "metricAnnotationsAllowList": { 21 | "value": "" 22 | }, 23 | "grafanaResourceId": { 24 | "value": "/subscriptions/{sub_id}/resourceGroups/{rg_name}/providers/Microsoft.Dashboard/grafana/{cluster_name}" 25 | }, 26 | "grafanaLocation": { 27 | "value": "{grafanaLocation}" 28 | }, 29 | "grafanaSku": { 30 | "value": "Standard" 31 | } 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /ArcBicepTemplate/nested_azuremonitormetrics_arc_k8s_extension_clusterResourceId.bicep: -------------------------------------------------------------------------------- 1 | param clusterLocation string 2 | 3 | @description('Resource Id of the Azure Arc Connected Cluster') 4 | param clusterResourceId string 5 | 6 | resource aksCluster 'Microsoft.Kubernetes/connectedClusters@2022-10-01-preview' existing = { 7 | name: split(clusterResourceId, '/')[8] 8 | } 9 | 10 | resource azuremonitor_metrics 'Microsoft.KubernetesConfiguration/extensions@2021-09-01' = { 11 | scope: aksCluster 12 | name: 'azuremonitor-metrics' 13 | location: clusterLocation 14 | identity: { 15 | type: 'SystemAssigned' 16 | } 17 | properties: { 18 | extensionType: 'Microsoft.AzureMonitor.Containers.Metrics' 19 | configurationSettings: { 20 | } 21 | configurationProtectedSettings: { 22 | } 23 | autoUpgradeMinorVersion: true 24 | releaseTrain: 'Dev' 25 | scope: { 26 | cluster: { 27 | releaseNamespace: 'kube-system' 28 | } 29 | } 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /ArcBicepTemplate/nested_azuremonitormetrics_dcra_clusterResourceId.bicep: -------------------------------------------------------------------------------- 1 | param resourceId_Microsoft_Insights_dataCollectionRules_variables_dcrName string 2 | param variables_clusterName string 3 | param variables_dcraName string 4 | param clusterLocation string 5 | 6 | resource variables_clusterName_microsoft_insights_variables_dcra 'Microsoft.Kubernetes/connectedClusters/providers/dataCollectionRuleAssociations@2021-09-01-preview' = { 7 | name: '${variables_clusterName}/microsoft.insights/${variables_dcraName}' 8 | location: clusterLocation 9 | properties: { 10 | description: 'Association of data collection rule. Deleting this association will break the data collection for this AKS Cluster.' 11 | dataCollectionRuleId: resourceId_Microsoft_Insights_dataCollectionRules_variables_dcrName 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /Azure-ARM-templates/Prometheus-RemoteWrite-DCR-artifacts/AMW.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure/prometheus-collector/1ab1718ac5543b703f93c5ff6e7bdde0c633164e/Azure-ARM-templates/Prometheus-RemoteWrite-DCR-artifacts/AMW.png -------------------------------------------------------------------------------- /Azure-ARM-templates/Prometheus-RemoteWrite-DCR-artifacts/CustomDCE.parameters.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentParameters.json#", 3 | "contentVersion": "1.0.0.0", 4 | "parameters": { 5 | "dataCollectionEndpointName": { 6 | "value": "" // TODO: Fill in parameter value 7 | }, 8 | "location": { 9 | "value": "" // TODO: Fill in parameter value 10 | } 11 | } 12 | } -------------------------------------------------------------------------------- /Azure-ARM-templates/Prometheus-RemoteWrite-DCR-artifacts/CustomDCR.parameters.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentParameters.json#", 3 | "contentVersion": "1.0.0.0", 4 | "parameters": { 5 | "dataCollectionRulesName": { 6 | "value": "" 7 | }, 8 | "dataCollectionEndpointsResourceId": { 9 | "value": "" 10 | }, 11 | "azureMonitorWorkspaceResourceId": { 12 | "value": "" 13 | }, 14 | "azureMonitorWorkspaceAccountId": { 15 | "value": "" 16 | }, 17 | "azureMonitorWorkspaceName": { 18 | "value": "" 19 | }, 20 | "location": { 21 | "value": "" 22 | } 23 | } 24 | } -------------------------------------------------------------------------------- /Azure-ARM-templates/Prometheus-RemoteWrite-DCR-artifacts/DCE_Overview.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure/prometheus-collector/1ab1718ac5543b703f93c5ff6e7bdde0c633164e/Azure-ARM-templates/Prometheus-RemoteWrite-DCR-artifacts/DCE_Overview.png -------------------------------------------------------------------------------- /Azure-ARM-templates/Prometheus-RemoteWrite-DCR-artifacts/DCR_JSON.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure/prometheus-collector/1ab1718ac5543b703f93c5ff6e7bdde0c633164e/Azure-ARM-templates/Prometheus-RemoteWrite-DCR-artifacts/DCR_JSON.png -------------------------------------------------------------------------------- /Azure-ARM-templates/Prometheus-RemoteWrite-DCR-artifacts/DCR_Overview.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure/prometheus-collector/1ab1718ac5543b703f93c5ff6e7bdde0c633164e/Azure-ARM-templates/Prometheus-RemoteWrite-DCR-artifacts/DCR_Overview.png -------------------------------------------------------------------------------- /Azure-ARM-templates/Workload-Rules/Alert-Rules-Parameters.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentParameters.json#", 3 | "contentVersion": "1.0.0.0", 4 | "parameters": { 5 | "clusterName": { 6 | "value": "" 7 | }, 8 | "actionGroupId": { 9 | "value": "/subscriptions//resourcegroups//providers/microsoft.insights/actiongroups/" 10 | }, 11 | "azureMonitorWorkspace": { 12 | "value": "/subscriptions//resourcegroups//providers/microsoft.monitor/accounts/" 13 | }, 14 | "location": { 15 | "value": "" 16 | } 17 | } 18 | } -------------------------------------------------------------------------------- /Azure-ARM-templates/Workload-Rules/Recording-Rules-Parameters.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentParameters.json#", 3 | "contentVersion": "1.0.0.0", 4 | "parameters": { 5 | "clusterName": { 6 | "value": "" 7 | }, 8 | "azureMonitorWorkspace": { 9 | "value": "/subscriptions//resourcegroups//providers/microsoft.monitor/accounts/" 10 | }, 11 | "location": { 12 | "value": "" 13 | } 14 | } 15 | } -------------------------------------------------------------------------------- /CODEOWNERS: -------------------------------------------------------------------------------- 1 | # Lines starting with '#' are comments. 2 | # Each line is a file pattern followed by one or more owners. 3 | 4 | # These owners will be the default owners for everything in the repo. 5 | * @azure/prometheus-collector-devs 6 | 7 | # Order is important. The last matching pattern has the most precedence. 8 | # So for example below (commented out) if a pull request only touches javascript files, only these owners 9 | # will be requested to review. 10 | #*.js @octocat @github/js 11 | 12 | # You can also use email addresses if you prefer. 13 | #docs/* docs@example.com 14 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Microsoft Open Source Code of Conduct 2 | 3 | This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). 4 | 5 | Resources: 6 | 7 | - [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/) 8 | - [Microsoft Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) 9 | - Contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with questions or concerns 10 | -------------------------------------------------------------------------------- /GeneratedMonitoringArtifacts/Default dashboards & metrics & rr lists.docx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure/prometheus-collector/1ab1718ac5543b703f93c5ff6e7bdde0c633164e/GeneratedMonitoringArtifacts/Default dashboards & metrics & rr lists.docx -------------------------------------------------------------------------------- /GeneratedMonitoringArtifacts/DefaultAlertsList.txt: -------------------------------------------------------------------------------- 1 | KubeJobNotCompleted 2 | KubeJobFailed 3 | KubePodCrashLooping 4 | KubePodNotReadyByController 5 | KubeDeploymentReplicasMismatch 6 | KubeStatefulSetReplicasMismatch 7 | KubeHpaReplicasMismatch 8 | KubeHpaMaxedOut 9 | KubeQuotaAlmostFull 10 | #CPUThrottlingHigh -> Disabled for now as false positives and noisy 11 | KubeMemoryQuotaOvercommit 12 | KubeCPUQuotaOvercommit 13 | KubeVersionMismatch 14 | KubeNodeNotReady 15 | KubeNodeReadinessFlapping 16 | KubeletTooManyPods 17 | KubeNodeUnreachable 18 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Azure Monitor managed service for Prometheus - Agent 4 | 5 | Copyright (c) Microsoft Corporation. 6 | 7 | Permission is hereby granted, free of charge, to any person obtaining a copy 8 | of this software and associated documentation files (the "Software"), to deal 9 | in the Software without restriction, including without limitation the rights 10 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 11 | copies of the Software, and to permit persons to whom the Software is 12 | furnished to do so, subject to the following conditions: 13 | 14 | The above copyright notice and this permission notice shall be included in all 15 | copies or substantial portions of the Software. 16 | 17 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 20 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 22 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 23 | SOFTWARE 24 | -------------------------------------------------------------------------------- /REMOTE-WRITE-RELEASENOTES.md: -------------------------------------------------------------------------------- 1 | # Azure Monitor managed service for Prometheus remote write 2 | 3 | ## Release 03-26-2025 4 | * Image - `mcr.microsoft.com/azuremonitor/containerinsights/ciprod/prometheus-remote-write/images:prom-remotewrite-20250326.1` 5 | * Change log - 6 | * CVE fixes 7 | - CVE-2025-22870 8 | - CVE-2025-30204 9 | 10 | ## Release 02-14-2025 11 | * Image - `mcr.microsoft.com/azuremonitor/containerinsights/ciprod/prometheus-remote-write/images:prom-remotewrite-20250214.1` 12 | * Change log - 13 | * CVE fixes 14 | - CVE-2024-45339 15 | - CVE-2019-11254 16 | * golang upgrade - 1.225 -> 1.23.6 17 | 18 | ## Release 01-06-2025 19 | * Image - `mcr.microsoft.com/azuremonitor/containerinsights/ciprod/prometheus-remote-write/images:prom-remotewrite-20250106.1` 20 | * Change log - 21 | * CVE fixes 22 | 23 | ## Release 06-17-2024 24 | * Image - `mcr.microsoft.com/azuremonitor/containerinsights/ciprod/prometheus-remote-write/images:prom-remotewrite-20240617.1` 25 | * Change log - 26 | * CVE fixes 27 | * golang update from 1.21.9 to 1.22.4 28 | -------------------------------------------------------------------------------- /SUPPORT.md: -------------------------------------------------------------------------------- 1 | 2 | # Support 3 | 4 | ## How to file issues and get help 5 | 6 | This project uses GitHub Issues to track bugs and feature requests. Please search the existing 7 | issues before filing new issues to avoid duplicates. For new issues, file your bug or 8 | feature request as a new Issue. 9 | 10 | For help and questions about using this project, please **REPO MAINTAINER: INSERT INSTRUCTIONS HERE 11 | FOR HOW TO ENGAGE REPO OWNERS OR COMMUNITY FOR HELP. COULD BE A STACK OVERFLOW TAG OR OTHER 12 | CHANNEL. WHERE WILL YOU HELP PEOPLE?**. 13 | 14 | ## Microsoft Support Policy 15 | 16 | Support for this **PROJECT or PRODUCT** is limited to the resources listed above. 17 | -------------------------------------------------------------------------------- /internal/docs/CONFIGPROCESSINGTESTS.md: -------------------------------------------------------------------------------- 1 | Following are some of the test cases to run through while making config processing changes 2 | 3 | | Default config | Custom config | 4 | | ----------------------- |:-------------:| 5 | | All default targets enabled | Valid custom configmap | 6 | | All default targets enabled | No custom configmap | 7 | | No default targets enabled | Valid custom configmap | 8 | | No default targets enabled | No custom configmap | 9 | | All or some default targets enabled | Invalid custom configmap | 10 | | No default targets enabled | Invalid custom configmap | 11 | 12 | **Test all of the above in simple and advanced mode** -------------------------------------------------------------------------------- /internal/docs/Telemetry.md: -------------------------------------------------------------------------------- 1 | Application Insights telemetry resources : 2 | 3 | 4 | | Cloud | Subscription | Resource Group | Name | 5 | |-------------------|--------------|-----------------------------------------------|-------------------------------------------| 6 | | AzureCloud | LA_ContainerInsights_Monitoring_USEast_Prod_02 | ContainerInsightsPrometheusCollector-Prod | ContainerInsightsPrometheusCollector-Prod | 7 | | AzureChinaCloud | LA_ContainerInsights_INFRAINSIGHTS_MoonCake_PROD_00 | ContainerInsightsPrometheusCollector-Mooncake | ContainerInsightsPrometheusCollector-Mooncake | 8 | | AzureUSGovernment | LA_ContainerInsights_ContainerInsights_Fairfax_PROD_00 | ContainerInsightsPrometheusCollector-Fairfax | ContainerInsightsPrometheusCollector-Fairfax | 9 | | ussec | N/A | N/A | ContainerInsightsPrometheusCollector-USSec | 10 | | usnat | N/A | N/A | ContainerInsightsPrometheusCollector-USNat | 11 | -------------------------------------------------------------------------------- /internal/grafana_uami/action.ps1: -------------------------------------------------------------------------------- 1 | # ARMClient doc: https://github.com/projectkudu/ARMClient 2 | # ARMClient login 3 | 4 | $grafanaResourceId="/subscriptions/{sub_id}/resourceGroups/{rg_name}/providers/Microsoft.Dashboard/grafana/{name}" 5 | $grafanaApiVersion="2023-10-01-preview" 6 | 7 | armclient get "$($grafanaResourceId)?api-version=$($grafanaApiVersion)" 8 | 9 | Write-Output "Add user-assigned managed identity to Grafana" 10 | armclient patch "$($grafanaResourceId)?api-version=$($grafanaApiVersion)" patch-add-umi.json -verbose 11 | -------------------------------------------------------------------------------- /internal/grafana_uami/patch-add-umi.json: -------------------------------------------------------------------------------- 1 | { 2 | "identity": { 3 | "type": "UserAssigned", 4 | "userAssignedIdentities": { 5 | "/subscriptions/{sub_id}/resourceGroups/{rg_name}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{name}": {} 6 | } 7 | } 8 | } -------------------------------------------------------------------------------- /internal/referenceapp/golang/go.mod: -------------------------------------------------------------------------------- 1 | module github.com/vishiy/opentelemetry-collector-builder 2 | 3 | go 1.14 4 | 5 | require github.com/prometheus/client_golang v1.11.1 6 | -------------------------------------------------------------------------------- /internal/referenceapp/golang/linux/Dockerfile: -------------------------------------------------------------------------------- 1 | 2 | ARG GOLANG_VERSION 3 | FROM mcr.microsoft.com/oss/go/microsoft/golang:${GOLANG_VERSION} as builder 4 | 5 | # Set necessary environmet variables needed for our image 6 | ENV GO111MODULE=on \ 7 | CGO_ENABLED=0 \ 8 | GOOS=linux \ 9 | GOARCH=amd64 10 | 11 | # Move to working directory /build 12 | WORKDIR /build 13 | 14 | # Copy and download dependency using go mod 15 | COPY go.mod . 16 | COPY go.sum . 17 | #RUN tdnf install -y golang-${GOLANG_VERSION} ca-certificates 18 | RUN go mod download 19 | 20 | # COPY client-cert.pem /etc/prometheus/certs/ 21 | # COPY client-key.pem /etc/prometheus/certs/ 22 | 23 | # Copy the code into the container 24 | COPY . . 25 | 26 | # Build the application 27 | RUN go build -o main . 28 | 29 | # Move to /dist directory as the place for resulting binary folder 30 | WORKDIR /dist 31 | 32 | # Copy binary from build to main folder 33 | RUN cp /build/main . 34 | 35 | FROM mcr.microsoft.com/cbl-mariner/distroless/base:2.0 36 | 37 | # Copy the binary from the builder stage 38 | COPY --from=builder /dist/main /dist/main 39 | 40 | # Export necessary ports 41 | EXPOSE 2112 2113 42 | 43 | # Command to run when starting the container 44 | CMD ["/dist/main"] 45 | -------------------------------------------------------------------------------- /internal/referenceapp/golang/windows/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM mcr.microsoft.com/oss/go/microsoft/golang:1.20.8-1-nanoserver-ltsc2022-amd64 2 | 3 | # Set necessary environmet variables needed for our image 4 | ENV GO111MODULE=on \ 5 | CGO_ENABLED=0 \ 6 | GOOS=windows \ 7 | GOARCH=amd64 8 | 9 | # Move to working directory /build 10 | WORKDIR /build 11 | 12 | # Copy and download dependency using go mod 13 | COPY go.mod . 14 | COPY go.sum . 15 | #RUN go mod download 16 | 17 | # Copy the code into the container 18 | COPY . . 19 | 20 | # Build the application 21 | RUN go build -o /dist/main.exe . 22 | 23 | # Move to /dist directory as the place for resulting binary folder 24 | #WORKDIR /dist 25 | 26 | # Copy binary from build to main folder 27 | #RUN copy "/build/main.exe" "/dist/main.exe" 28 | 29 | # Export necessary ports 30 | EXPOSE 2112 2113 31 | 32 | # Command to run when starting the container 33 | CMD ["/dist/main.exe"] 34 | -------------------------------------------------------------------------------- /internal/referenceapp/linux-http-scrape-config.yaml: -------------------------------------------------------------------------------- 1 | scrape_configs: 2 | - job_name: prometheus_ref_app_1 3 | scheme: http 4 | scrape_interval: 60s 5 | kubernetes_sd_configs: 6 | - role: pod 7 | relabel_configs: 8 | - source_labels: [__meta_kubernetes_pod_label_app] 9 | action: keep 10 | regex: "prometheus-reference-app" 11 | - source_labels: [__address__] 12 | action: replace 13 | target_label: __param_target 14 | regex: ":2113" 15 | - source_labels: [__param_target] 16 | action: keep 17 | regex: "2113" 18 | - action: drop 19 | -------------------------------------------------------------------------------- /internal/referenceapp/linux-https-scrape-config.yaml: -------------------------------------------------------------------------------- 1 | scrape_configs: 2 | - job_name: prometheus_ref_app 3 | scheme: https 4 | scrape_interval: 60s 5 | tls_config: 6 | ca_file: /etc/prometheus/certs/client-cert.pem 7 | cert_file: /etc/prometheus/certs/client-cert.pem 8 | key_file: /etc/prometheus/certs/client-key.pem 9 | insecure_skip_verify: false 10 | kubernetes_sd_configs: 11 | - role: pod 12 | relabel_configs: 13 | - source_labels: [__meta_kubernetes_pod_label_app] 14 | action: keep 15 | regex: "prometheus-reference-app" 16 | -------------------------------------------------------------------------------- /internal/referenceapp/linux-scrape-config.yaml: -------------------------------------------------------------------------------- 1 | scrape_configs: 2 | - job_name: prometheus_ref_app 3 | scheme: http 4 | scrape_interval: 60s 5 | kubernetes_sd_configs: 6 | - role: pod 7 | relabel_configs: 8 | - source_labels: [__meta_kubernetes_pod_label_app] 9 | action: keep 10 | regex: "prometheus-reference-app" 11 | -------------------------------------------------------------------------------- /internal/referenceapp/prometheus-config: -------------------------------------------------------------------------------- 1 | scrape_configs: 2 | - job_name: prometheus_ref_app 3 | scheme: http 4 | scrape_interval: 60s 5 | tls_config: 6 | ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca/ca-cert.pem 7 | cert_file: /var/run/secrets/kubernetes.io/serviceaccount/client/client-cert.pem 8 | key_file: /var/run/secrets/kubernetes.io/serviceaccount/client/client-key.pem 9 | insecure_skip_verify: false 10 | kubernetes_sd_configs: 11 | - role: pod 12 | relabel_configs: 13 | - source_labels: [__meta_kubernetes_pod_label_app] 14 | action: keep 15 | regex: "prometheus-reference-app" 16 | 17 | -------------------------------------------------------------------------------- /internal/referenceapp/python/app.py: -------------------------------------------------------------------------------- 1 | # Example code snippet from https://github.com/prometheus/client_python 2 | 3 | from prometheus_client import start_http_server, Summary 4 | import random 5 | import time 6 | 7 | # Create a metric to track time spent and requests made. 8 | REQUEST_TIME = Summary('request_processing_seconds', 'Time spent processing request') 9 | 10 | # Decorate function with metric. 11 | @REQUEST_TIME.time() 12 | def process_request(t): 13 | """A dummy function that takes some time.""" 14 | time.sleep(t) 15 | 16 | if __name__ == '__main__': 17 | # Start up the server to expose the metrics. 18 | start_http_server(2114) 19 | # Generate some requests. 20 | while True: 21 | process_request(random.random()) -------------------------------------------------------------------------------- /internal/referenceapp/python/linux/Dockerfile: -------------------------------------------------------------------------------- 1 | # syntax=docker/dockerfile:1 2 | 3 | FROM mcr.microsoft.com/azurelinux/base/python:3 4 | 5 | # Move to working directory /build 6 | WORKDIR /build 7 | 8 | RUN pip install prometheus-client 9 | 10 | # Copy the code into the container 11 | COPY . . 12 | 13 | # Export necessary port 14 | EXPOSE 2114 15 | 16 | CMD [ "python", "app.py", "--host=0.0.0.0"] 17 | -------------------------------------------------------------------------------- /internal/referenceapp/python/windows/Dockerfile: -------------------------------------------------------------------------------- 1 | # syntax=docker/dockerfile:1 2 | 3 | FROM mcr.microsoft.com/windows-cssc/python:3.11-servercore-ltsc2022 4 | 5 | # Move to working directory /build 6 | WORKDIR /build 7 | 8 | RUN python -m pip install prometheus-client 9 | 10 | # Copy the code into the container 11 | COPY . . 12 | 13 | RUN dir 14 | 15 | # Export necessary port 16 | EXPOSE 2114 17 | 18 | CMD [ "python", "app.py", "--host=0.0.0.0"] 19 | -------------------------------------------------------------------------------- /internal/referenceapp/testTemplates/azure-pvc-disk.yaml: -------------------------------------------------------------------------------- 1 | kind: Pod 2 | apiVersion: v1 3 | metadata: 4 | name: mypod 5 | spec: 6 | containers: 7 | - name: mypod 8 | image: mcr.microsoft.com/oss/nginx/nginx:1.15.5-alpine 9 | resources: 10 | requests: 11 | cpu: 100m 12 | memory: 128Mi 13 | limits: 14 | cpu: 250m 15 | memory: 256Mi 16 | volumeMounts: 17 | - mountPath: "/mnt/azure" 18 | name: volume 19 | volumes: 20 | - name: volume 21 | persistentVolumeClaim: 22 | claimName: azure-managed-disk -------------------------------------------------------------------------------- /internal/referenceapp/testTemplates/azure-pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: azure-managed-disk 5 | spec: 6 | accessModes: 7 | - ReadWriteOnce 8 | storageClassName: managed-csi 9 | resources: 10 | requests: 11 | storage: 5Gi -------------------------------------------------------------------------------- /internal/referenceapp/windows-scrape-config.yaml: -------------------------------------------------------------------------------- 1 | scrape_configs: 2 | - job_name: win_prometheus_ref_app 3 | scrape_interval: 60s 4 | scheme: http 5 | metrics_path: /metrics 6 | static_configs: 7 | - targets: ['win-prometheus-reference-service.default.svc.cluster.local:2112','win-prometheus-reference-service.default.svc.cluster.local:2113','win-prometheus-reference-service.default.svc.cluster.local:2114'] 8 | -------------------------------------------------------------------------------- /internal/windowsExporterInstaller/README.md: -------------------------------------------------------------------------------- 1 | 2 | Deploy the `windows-exporter-daemonset.yaml` to your kubernetes cluster (version 1.21+) to get windows exporter running on your clusters nodes. -------------------------------------------------------------------------------- /mixins/coredns/.gitignore: -------------------------------------------------------------------------------- 1 | prometheus_alerts.yaml 2 | vendor 3 | jsonnetfile.lock.json 4 | dashboards_out 5 | -------------------------------------------------------------------------------- /mixins/coredns/alerts/add-runbook-links.libsonnet: -------------------------------------------------------------------------------- 1 | local utils = import '../lib/utils.libsonnet'; 2 | 3 | local lower(x) = 4 | local cp(c) = std.codepoint(c); 5 | local lowerLetter(c) = 6 | if cp(c) >= 65 && cp(c) < 91 7 | then std.char(cp(c) + 32) 8 | else c; 9 | std.join('', std.map(lowerLetter, std.stringChars(x))); 10 | 11 | { 12 | _config+:: { 13 | corednsRunbookURLPattern: 'https://github.com/povilasv/coredns-mixin/tree/master/runbook.md#alert-name-%s', 14 | }, 15 | 16 | prometheusAlerts+:: 17 | local addRunbookURL(rule, group) = rule { 18 | [if 'alert' in rule && std.member(['coredns', 'coredns_forward'], group.name) then 'annotations']+: { 19 | runbook_url: $._config.corednsRunbookURLPattern % lower(rule.alert), 20 | }, 21 | }; 22 | utils.mapRuleGroups(addRunbookURL), 23 | } 24 | -------------------------------------------------------------------------------- /mixins/coredns/alerts/alerts.libsonnet: -------------------------------------------------------------------------------- 1 | (import 'coredns.libsonnet') + 2 | (import 'forward.libsonnet') + 3 | (import 'add-runbook-links.libsonnet') 4 | -------------------------------------------------------------------------------- /mixins/coredns/config.libsonnet: -------------------------------------------------------------------------------- 1 | { 2 | _config+:: { 3 | corednsSelector: 'job="kube-dns"', 4 | instanceLabel: 'pod', 5 | 6 | grafanaDashboardIDs: { 7 | 'coredns.json': 'ddcc77bf776f4f5f97660c85e1e96738', 8 | }, 9 | 10 | pluginNameLabel: 'name', 11 | kubernetesPlugin: false, 12 | grafana: { 13 | dashboardNamePrefix: '', 14 | dashboardTags: ['coredns-mixin'], 15 | 16 | // The default refresh time for all dashboards, default to 10s 17 | refresh: '1m', 18 | 19 | }, 20 | 21 | // Opt-in for multi-cluster support. 22 | showMultiCluster: true, 23 | clusterLabel: 'cluster', 24 | }, 25 | } 26 | -------------------------------------------------------------------------------- /mixins/coredns/dashboards/dashboards.libsonnet: -------------------------------------------------------------------------------- 1 | (import 'coredns.libsonnet') 2 | -------------------------------------------------------------------------------- /mixins/coredns/jsonnetfile.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": 1, 3 | "dependencies": [ 4 | { 5 | "source": { 6 | "git": { 7 | "remote": "https://github.com/grafana/grafonnet-lib", 8 | "subdir": "grafonnet" 9 | } 10 | }, 11 | "version": "master" 12 | } 13 | ], 14 | "legacyImports": true 15 | } 16 | -------------------------------------------------------------------------------- /mixins/coredns/lib/alerts.jsonnet: -------------------------------------------------------------------------------- 1 | std.manifestYamlDoc((import '../mixin.libsonnet').prometheusAlerts) 2 | -------------------------------------------------------------------------------- /mixins/coredns/lib/dashboards.jsonnet: -------------------------------------------------------------------------------- 1 | local dashboards = (import '../mixin.libsonnet').grafanaDashboards; 2 | 3 | { 4 | [name]: dashboards[name] 5 | for name in std.objectFields(dashboards) 6 | } 7 | -------------------------------------------------------------------------------- /mixins/coredns/lib/utils.libsonnet: -------------------------------------------------------------------------------- 1 | { 2 | mapRuleGroups(f): { 3 | groups: [ 4 | group { 5 | rules: [ 6 | f(rule, group) 7 | for rule in super.rules 8 | ], 9 | } 10 | for group in super.groups 11 | ], 12 | }, 13 | } 14 | -------------------------------------------------------------------------------- /mixins/coredns/mixin.libsonnet: -------------------------------------------------------------------------------- 1 | (import 'alerts/alerts.libsonnet') + 2 | (import 'dashboards/dashboards.libsonnet') + 3 | (import 'config.libsonnet') 4 | -------------------------------------------------------------------------------- /mixins/kubernetes/.gitignore: -------------------------------------------------------------------------------- 1 | prometheus_alerts.yaml 2 | prometheus_rules.yaml 3 | dashboards_out 4 | vendor 5 | jsonnetfile.lock.json 6 | tmp 7 | -------------------------------------------------------------------------------- /mixins/kubernetes/.lint: -------------------------------------------------------------------------------- 1 | exclusions: 2 | template-job-rule: 3 | template-instance-rule: 4 | target-job-rule: 5 | target-instance-rule: 6 | panel-title-description-rule: 7 | panel-units-rule: 8 | panel-datasource-rule: 9 | reason: The new Grafonnet promotes the use of datasources at the query level. This should probably end up in the linter as a valid option. 10 | -------------------------------------------------------------------------------- /mixins/kubernetes/.vale.ini: -------------------------------------------------------------------------------- 1 | StylesPath = .vale/styles 2 | 3 | MinAlertLevel = error 4 | 5 | Packages = Readability, write-good, alex 6 | 7 | [*] 8 | BasedOnStyles = Readability, write-good, alex 9 | -------------------------------------------------------------------------------- /mixins/kubernetes/OWNERS: -------------------------------------------------------------------------------- 1 | # See the OWNERS docs: https://git.k8s.io/community/contributors/guide/owners.md 2 | 3 | approvers: 4 | - brancz 5 | - csmarchbanks 6 | - metalmatze 7 | - tomwilkie 8 | - s-urbaniak 9 | - povilasv 10 | - paulfantom 11 | 12 | reviewers: 13 | - brancz 14 | - csmarchbanks 15 | - metalmatze 16 | - tomwilkie 17 | - s-urbaniak 18 | - povilasv 19 | - paulfantom 20 | -------------------------------------------------------------------------------- /mixins/kubernetes/SECURITY.md: -------------------------------------------------------------------------------- 1 | # Security Policy 2 | 3 | ## Reporting a Vulnerability 4 | 5 | If you discover a security issue in this project, please report it to the project's [SECURITY_CONTACTS](SECURITY_CONTACTS). You can also ping the project's maintainers through the project's [Slack](https://kubernetes.slack.com/archives/CAX9GU941), privately. 6 | -------------------------------------------------------------------------------- /mixins/kubernetes/SECURITY_CONTACTS: -------------------------------------------------------------------------------- 1 | # Defined below are the security contacts for this repo. 2 | # 3 | # They are the contact point for the Product Security Committee to reach out 4 | # to for triaging and handling of incoming issues. 5 | # 6 | # The below names agree to abide by the 7 | # [Embargo Policy](https://git.k8s.io/security/private-distributors-list.md#embargo-policy) 8 | # and will be removed and replaced if they violate that agreement. 9 | # 10 | # DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE 11 | # INSTRUCTIONS AT https://kubernetes.io/security/ 12 | 13 | brancz 14 | csmarchbanks 15 | metalmatze 16 | tomwilkie 17 | -------------------------------------------------------------------------------- /mixins/kubernetes/alerts/alerts.libsonnet: -------------------------------------------------------------------------------- 1 | (import 'apps_alerts.libsonnet') + 2 | (import 'resource_alerts.libsonnet') + 3 | (import 'storage_alerts.libsonnet') + 4 | (import 'system_alerts.libsonnet') + 5 | (import 'kube_apiserver.libsonnet') + 6 | (import 'kubelet.libsonnet') + 7 | (import 'kube_scheduler.libsonnet') + 8 | (import 'kube_controller_manager.libsonnet') + 9 | (import 'kube_proxy.libsonnet') + 10 | (import '../lib/add-runbook-links.libsonnet') 11 | -------------------------------------------------------------------------------- /mixins/kubernetes/alerts/kube_controller_manager.libsonnet: -------------------------------------------------------------------------------- 1 | { 2 | _config+:: { 3 | kubeControllerManagerSelector: error 'must provide selector for kube-controller-manager', 4 | }, 5 | 6 | prometheusAlerts+:: { 7 | groups+: [ 8 | { 9 | name: 'kubernetes-system-controller-manager', 10 | rules: [ 11 | (import '../lib/absent_alert.libsonnet') { 12 | componentName:: 'KubeControllerManager', 13 | selector:: $._config.kubeControllerManagerSelector, 14 | }, 15 | ], 16 | }, 17 | ], 18 | }, 19 | } 20 | -------------------------------------------------------------------------------- /mixins/kubernetes/alerts/kube_proxy.libsonnet: -------------------------------------------------------------------------------- 1 | { 2 | _config+:: { 3 | kubeProxySelector: error 'must provide selector for kube-proxy', 4 | }, 5 | 6 | prometheusAlerts+:: { 7 | groups+: [ 8 | { 9 | name: 'kubernetes-system-kube-proxy', 10 | rules: [ 11 | (import '../lib/absent_alert.libsonnet') { 12 | componentName:: 'KubeProxy', 13 | selector:: $._config.kubeProxySelector, 14 | }, 15 | ], 16 | }, 17 | ], 18 | }, 19 | } 20 | -------------------------------------------------------------------------------- /mixins/kubernetes/alerts/kube_scheduler.libsonnet: -------------------------------------------------------------------------------- 1 | { 2 | _config+:: { 3 | kubeSchedulerSelector: 'job="kube-scheduler"', 4 | }, 5 | 6 | prometheusAlerts+:: { 7 | groups+: [ 8 | { 9 | name: 'kubernetes-system-scheduler', 10 | rules: [ 11 | (import '../lib/absent_alert.libsonnet') { 12 | componentName:: 'KubeScheduler', 13 | selector:: $._config.kubeSchedulerSelector, 14 | }, 15 | ], 16 | }, 17 | ], 18 | }, 19 | } 20 | -------------------------------------------------------------------------------- /mixins/kubernetes/dashboards/dashboards.libsonnet: -------------------------------------------------------------------------------- 1 | (import 'network.libsonnet') + 2 | (import 'persistentvolumesusage.libsonnet') + 3 | (import 'resources.libsonnet') + 4 | (import 'apiserver.libsonnet') + 5 | (import 'controller-manager.libsonnet') + 6 | (import 'scheduler.libsonnet') + 7 | (import 'proxy.libsonnet') + 8 | (import 'kubelet.libsonnet') + 9 | (import 'defaults.libsonnet') 10 | -------------------------------------------------------------------------------- /mixins/kubernetes/dashboards/network.libsonnet: -------------------------------------------------------------------------------- 1 | (import 'network-usage/cluster-total.libsonnet') + 2 | (import 'network-usage/namespace-by-workload.libsonnet') + 3 | (import 'network-usage/namespace-by-pod.libsonnet') + 4 | (import 'network-usage/pod-total.libsonnet') + 5 | (import 'network-usage/workload-total.libsonnet') 6 | -------------------------------------------------------------------------------- /mixins/kubernetes/dashboards/resources.libsonnet: -------------------------------------------------------------------------------- 1 | (import 'resources/cluster.libsonnet') + 2 | (import 'resources/multi-cluster.libsonnet') + 3 | (import 'resources/namespace.libsonnet') + 4 | (import 'resources/node.libsonnet') + 5 | (import 'resources/pod.libsonnet') + 6 | (import 'resources/workload-namespace.libsonnet') + 7 | (import 'resources/workload.libsonnet') 8 | -------------------------------------------------------------------------------- /mixins/kubernetes/jsonnetfile.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": 1, 3 | "dependencies": [ 4 | { 5 | "source": { 6 | "git": { 7 | "remote": "https://github.com/grafana/grafonnet-lib.git", 8 | "subdir": "grafonnet" 9 | } 10 | }, 11 | "version": "master" 12 | }, 13 | { 14 | "source": { 15 | "git": { 16 | "remote": "https://github.com/grafana/grafonnet.git", 17 | "subdir": "gen/grafonnet-latest" 18 | } 19 | }, 20 | "version": "main" 21 | }, 22 | { 23 | "source": { 24 | "git": { 25 | "remote": "https://github.com/grafana/jsonnet-libs.git", 26 | "subdir": "grafana-builder" 27 | } 28 | }, 29 | "version": "02db06f540086fa3f67d487bd01e1b314853fb8f" 30 | } 31 | ], 32 | "legacyImports": false 33 | } 34 | -------------------------------------------------------------------------------- /mixins/kubernetes/lib/absent_alert.libsonnet: -------------------------------------------------------------------------------- 1 | { 2 | local absentAlert = self, 3 | componentName:: error 'must provide component name', 4 | selector:: error 'must provide selector for component', 5 | 6 | alert: '%sDown' % absentAlert.componentName, 7 | expr: ||| 8 | absent(up{%s} == 1) 9 | ||| % absentAlert.selector, 10 | 'for': '15m', 11 | labels: { 12 | severity: 'critical', 13 | }, 14 | annotations: { 15 | description: '%s has disappeared from Prometheus target discovery.' % absentAlert.componentName, 16 | summary: 'Target disappeared from Prometheus target discovery.', 17 | }, 18 | } 19 | -------------------------------------------------------------------------------- /mixins/kubernetes/lib/add-runbook-links.libsonnet: -------------------------------------------------------------------------------- 1 | local utils = import 'utils.libsonnet'; 2 | 3 | local lower(x) = 4 | local cp(c) = std.codepoint(c); 5 | local lowerLetter(c) = 6 | if cp(c) >= 65 && cp(c) < 91 7 | then std.char(cp(c) + 32) 8 | else c; 9 | std.join('', std.map(lowerLetter, std.stringChars(x))); 10 | 11 | { 12 | _config+:: { 13 | runbookURLPattern: 'https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-%s', 14 | }, 15 | 16 | prometheusAlerts+:: 17 | local addRunbookURL(rule) = rule { 18 | [if 'alert' in rule && !('runbook_url' in rule.annotations) then 'annotations']+: { 19 | runbook_url: $._config.runbookURLPattern % lower(rule.alert), 20 | }, 21 | }; 22 | utils.mapRuleGroups(addRunbookURL), 23 | } 24 | -------------------------------------------------------------------------------- /mixins/kubernetes/lib/alerts.jsonnet: -------------------------------------------------------------------------------- 1 | std.manifestYamlDoc((import '../mixin.libsonnet').prometheusAlerts) 2 | -------------------------------------------------------------------------------- /mixins/kubernetes/lib/dashboards.jsonnet: -------------------------------------------------------------------------------- 1 | local dashboards = (import '../mixin.libsonnet').grafanaDashboards; 2 | 3 | { 4 | [name]: dashboards[name] 5 | for name in std.objectFields(dashboards) 6 | } 7 | -------------------------------------------------------------------------------- /mixins/kubernetes/lib/promgrafonnet/numbersinglestat.libsonnet: -------------------------------------------------------------------------------- 1 | local grafana = import 'github.com/grafana/grafonnet-lib/grafonnet/grafana.libsonnet'; 2 | local singlestat = grafana.singlestat; 3 | local prometheus = grafana.prometheus; 4 | 5 | { 6 | new(title, query):: 7 | singlestat.new( 8 | title, 9 | datasource='$datasource', 10 | span=3, 11 | valueName='current', 12 | valueMaps=[ 13 | { 14 | op: '=', 15 | text: '0', 16 | value: 'null', 17 | }, 18 | ], 19 | ) 20 | .addTarget( 21 | prometheus.target( 22 | query 23 | ) 24 | ) + { 25 | withTextNullValue(text):: self { 26 | valueMaps: [ 27 | { 28 | op: '=', 29 | text: text, 30 | value: 'null', 31 | }, 32 | ], 33 | }, 34 | withSpanSize(size):: self { 35 | span: size, 36 | }, 37 | withPostfix(postfix):: self { 38 | postfix: postfix, 39 | }, 40 | withSparkline():: self { 41 | sparkline: { 42 | show: true, 43 | lineColor: 'rgb(31, 120, 193)', 44 | fillColor: 'rgba(31, 118, 189, 0.18)', 45 | }, 46 | }, 47 | }, 48 | } 49 | -------------------------------------------------------------------------------- /mixins/kubernetes/lib/promgrafonnet/promgrafonnet.libsonnet: -------------------------------------------------------------------------------- 1 | { 2 | numbersinglestat:: import 'numbersinglestat.libsonnet', 3 | gauge:: import 'gauge.libsonnet', 4 | } 5 | -------------------------------------------------------------------------------- /mixins/kubernetes/lib/rules.jsonnet: -------------------------------------------------------------------------------- 1 | std.manifestYamlDoc((import '../mixin.libsonnet').prometheusRules) 2 | -------------------------------------------------------------------------------- /mixins/kubernetes/lib/utils.libsonnet: -------------------------------------------------------------------------------- 1 | { 2 | mapRuleGroups(f): { 3 | groups: [ 4 | group { 5 | rules: [ 6 | f(rule) 7 | for rule in super.rules 8 | ], 9 | } 10 | for group in super.groups 11 | ], 12 | }, 13 | 14 | humanizeSeconds(s):: 15 | if s > 60 * 60 * 24 16 | then '%.1f days' % (s / 60 / 60 / 24) 17 | else '%.1f hours' % (s / 60 / 60), 18 | } 19 | -------------------------------------------------------------------------------- /mixins/kubernetes/mixin.libsonnet: -------------------------------------------------------------------------------- 1 | (import 'alerts/alerts.libsonnet') + 2 | (import 'dashboards/dashboards.libsonnet') + 3 | (import 'rules/rules.libsonnet') + 4 | (import 'dashboards/windows.libsonnet') + 5 | (import 'config.libsonnet') 6 | -------------------------------------------------------------------------------- /mixins/kubernetes/rules/kube_apiserver-config.libsonnet: -------------------------------------------------------------------------------- 1 | { 2 | _config+:: { 3 | kubeApiserverSelector: 'job="kube-apiserver"', 4 | podLabel: 'pod', 5 | kubeApiserverReadSelector: 'verb=~"LIST|GET"', 6 | kubeApiserverWriteSelector: 'verb=~"POST|PUT|PATCH|DELETE"', 7 | kubeApiserverNonStreamingSelector: 'subresource!~"proxy|attach|log|exec|portforward"', 8 | // These are buckets that exist on the apiserver_request_sli_duration_seconds_bucket histogram. 9 | // They are what the Kubernetes SIG Scalability is using to measure availability of Kubernetes clusters. 10 | // If you want to change these, make sure the "le" buckets exist on the histogram! 11 | kubeApiserverReadResourceLatency: '1', 12 | kubeApiserverReadNamespaceLatency: '5', 13 | kubeApiserverReadClusterLatency: '30', 14 | kubeApiserverWriteLatency: '1', 15 | }, 16 | } 17 | -------------------------------------------------------------------------------- /mixins/kubernetes/rules/kube_apiserver-histogram.libsonnet: -------------------------------------------------------------------------------- 1 | { 2 | prometheusRules+:: { 3 | local verbs = [ 4 | { type: 'read', selector: $._config.kubeApiserverReadSelector }, 5 | { type: 'write', selector: $._config.kubeApiserverWriteSelector }, 6 | ], 7 | 8 | groups+: [ 9 | { 10 | name: 'kube-apiserver-histogram.rules', 11 | rules: 12 | [ 13 | { 14 | record: 'cluster_quantile:apiserver_request_sli_duration_seconds:histogram_quantile', 15 | expr: ||| 16 | histogram_quantile(0.99, sum by (%s, le, resource) (rate(apiserver_request_sli_duration_seconds_bucket{%s}[5m]))) > 0 17 | ||| % [$._config.clusterLabel, std.join(',', [$._config.kubeApiserverSelector, verb.selector, $._config.kubeApiserverNonStreamingSelector])], 18 | labels: { 19 | verb: verb.type, 20 | quantile: '0.99', 21 | }, 22 | } 23 | for verb in verbs 24 | ], 25 | }, 26 | ], 27 | }, 28 | } 29 | -------------------------------------------------------------------------------- /mixins/kubernetes/rules/kube_apiserver.libsonnet: -------------------------------------------------------------------------------- 1 | (import 'kube_apiserver-config.libsonnet') + 2 | (import 'kube_apiserver-availability.libsonnet') + 3 | (import 'kube_apiserver-burnrate.libsonnet') + 4 | (import 'kube_apiserver-histogram.libsonnet') 5 | -------------------------------------------------------------------------------- /mixins/kubernetes/rules/kube_scheduler.libsonnet: -------------------------------------------------------------------------------- 1 | { 2 | _config+:: { 3 | kubeSchedulerSelector: 'job="kube-scheduler"', 4 | podLabel: 'pod', 5 | }, 6 | 7 | prometheusRules+:: { 8 | groups+: [ 9 | { 10 | name: 'kube-scheduler.rules', 11 | rules: [ 12 | { 13 | record: 'cluster_quantile:%s:histogram_quantile' % metric, 14 | expr: ||| 15 | histogram_quantile(%(quantile)s, sum(rate(%(metric)s_bucket{%(kubeSchedulerSelector)s}[5m])) without(instance, %(podLabel)s)) 16 | ||| % ({ quantile: quantile, metric: metric } + $._config), 17 | labels: { 18 | quantile: quantile, 19 | }, 20 | } 21 | for quantile in ['0.99', '0.9', '0.5'] 22 | for metric in [ 23 | 'scheduler_e2e_scheduling_duration_seconds', 24 | 'scheduler_scheduling_algorithm_duration_seconds', 25 | 'scheduler_binding_duration_seconds', 26 | ] 27 | ], 28 | }, 29 | ], 30 | }, 31 | } 32 | -------------------------------------------------------------------------------- /mixins/kubernetes/rules/kubelet.libsonnet: -------------------------------------------------------------------------------- 1 | { 2 | _config+:: { 3 | kubeletSelector: 'job="kubelet"', 4 | }, 5 | 6 | prometheusRules+:: { 7 | groups+: [ 8 | { 9 | name: 'kubelet.rules', 10 | rules: [ 11 | { 12 | record: 'node_quantile:kubelet_pleg_relist_duration_seconds:histogram_quantile', 13 | expr: ||| 14 | histogram_quantile(%(quantile)s, sum(rate(kubelet_pleg_relist_duration_seconds_bucket{%(kubeletSelector)s}[5m])) by (%(clusterLabel)s, instance, le) * on(%(clusterLabel)s, instance) group_left(node) kubelet_node_name{%(kubeletSelector)s}) 15 | ||| % ({ quantile: quantile } + $._config), 16 | labels: { 17 | quantile: quantile, 18 | }, 19 | } 20 | for quantile in ['0.99', '0.9', '0.5'] 21 | ], 22 | }, 23 | ], 24 | }, 25 | } 26 | -------------------------------------------------------------------------------- /mixins/kubernetes/rules/rules.libsonnet: -------------------------------------------------------------------------------- 1 | (import 'kube_apiserver.libsonnet') + 2 | (import 'apps.libsonnet') + 3 | (import 'kube_scheduler.libsonnet') + 4 | (import 'node.libsonnet') + 5 | (import 'kubelet.libsonnet') 6 | -------------------------------------------------------------------------------- /mixins/kubernetes/scripts/tools.go: -------------------------------------------------------------------------------- 1 | //go:build tools 2 | // +build tools 3 | 4 | // Packae tols tracks dependencies for tools that used in the build process. 5 | // See https://github.com/golang/go/issues/25922 6 | package tools 7 | 8 | import ( 9 | _ "github.com/Kunde21/markdownfmt/v3/cmd/markdownfmt" 10 | _ "github.com/cloudflare/pint/cmd/pint" 11 | _ "github.com/errata-ai/vale/v3/cmd/vale" 12 | _ "github.com/google/go-jsonnet/cmd/jsonnet" 13 | _ "github.com/google/go-jsonnet/cmd/jsonnet-lint" 14 | _ "github.com/google/go-jsonnet/cmd/jsonnetfmt" 15 | _ "github.com/grafana/dashboard-linter" 16 | _ "github.com/jsonnet-bundler/jsonnet-bundler/cmd/jb" 17 | _ "github.com/prometheus/prometheus/cmd/promtool" 18 | ) 19 | -------------------------------------------------------------------------------- /mixins/node/.gitignore: -------------------------------------------------------------------------------- 1 | jsonnetfile.lock.json 2 | vendor 3 | *.yaml 4 | dashboards_out 5 | -------------------------------------------------------------------------------- /mixins/node/Makefile: -------------------------------------------------------------------------------- 1 | JSONNET_FMT := jsonnetfmt -n 2 --max-blank-lines 2 --string-style s --comment-style s 2 | 3 | all: fmt node_alerts.yaml node_rules.yaml dashboards_out lint 4 | 5 | fmt: 6 | find . -name 'vendor' -prune -o -name '*.libsonnet' -print -o -name '*.jsonnet' -print | \ 7 | xargs -n 1 -- $(JSONNET_FMT) -i 8 | 9 | node_alerts.yaml: mixin.libsonnet config.libsonnet $(wildcard alerts/*) 10 | jsonnet -S alerts.jsonnet > $@ 11 | 12 | node_rules.yaml: mixin.libsonnet config.libsonnet $(wildcard rules/*) 13 | jsonnet -S rules.jsonnet > $@ 14 | 15 | dashboards_out: mixin.libsonnet config.libsonnet $(wildcard dashboards/*) 16 | @mkdir -p dashboards_out 17 | jsonnet -J vendor -m dashboards_out dashboards.jsonnet 18 | 19 | lint: node_alerts.yaml node_rules.yaml 20 | find . -name 'vendor' -prune -o -name '*.libsonnet' -print -o -name '*.jsonnet' -print | \ 21 | while read f; do \ 22 | $(JSONNET_FMT) "$$f" | diff -u "$$f" -; \ 23 | done 24 | 25 | # promtool check rules node_alerts.yaml node_rules.yaml 26 | 27 | .PHONY: jb_install 28 | jb_install: 29 | jb install 30 | 31 | clean: 32 | rm -rf dashboards_out node_alerts.yaml node_rules.yaml 33 | -------------------------------------------------------------------------------- /mixins/node/alerts.jsonnet: -------------------------------------------------------------------------------- 1 | std.manifestYamlDoc((import 'mixin.libsonnet').prometheusAlerts) 2 | -------------------------------------------------------------------------------- /mixins/node/dashboards.jsonnet: -------------------------------------------------------------------------------- 1 | local dashboards = (import 'mixin.libsonnet').grafanaDashboards; 2 | 3 | { 4 | [name]: dashboards[name] 5 | for name in std.objectFields(dashboards) 6 | } 7 | -------------------------------------------------------------------------------- /mixins/node/dashboards/dashboards.libsonnet: -------------------------------------------------------------------------------- 1 | (import 'node.libsonnet') + 2 | (import 'use.libsonnet') 3 | -------------------------------------------------------------------------------- /mixins/node/dashboards/node.libsonnet: -------------------------------------------------------------------------------- 1 | { 2 | local nodemixin = import '../lib/prom-mixin.libsonnet', 3 | grafanaDashboards+:: { 4 | 'nodes.json': nodemixin.new(config=$._config, platform='Linux').dashboard, 5 | 'nodes-darwin.json': nodemixin.new(config=$._config, platform='Darwin').dashboard, 6 | }, 7 | } 8 | -------------------------------------------------------------------------------- /mixins/node/jsonnetfile.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": 1, 3 | "dependencies": [ 4 | { 5 | "source": { 6 | "git": { 7 | "remote": "https://github.com/grafana/grafonnet-lib.git", 8 | "subdir": "grafonnet" 9 | } 10 | }, 11 | "version": "master" 12 | }, 13 | { 14 | "source": { 15 | "git": { 16 | "remote": "https://github.com/grafana/grafonnet-lib.git", 17 | "subdir": "grafonnet-7.0" 18 | } 19 | }, 20 | "version": "master" 21 | } 22 | ], 23 | "legacyImports": false 24 | } 25 | -------------------------------------------------------------------------------- /mixins/node/mixin.libsonnet: -------------------------------------------------------------------------------- 1 | (import 'config.libsonnet') + 2 | (import 'alerts/alerts.libsonnet') + 3 | (import 'dashboards/dashboards.libsonnet') + 4 | (import 'rules/rules.libsonnet') 5 | -------------------------------------------------------------------------------- /mixins/node/rules.jsonnet: -------------------------------------------------------------------------------- 1 | std.manifestYamlDoc((import 'mixin.libsonnet').prometheusRules) 2 | -------------------------------------------------------------------------------- /otelcollector/LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) Microsoft Corporation. 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE 22 | -------------------------------------------------------------------------------- /otelcollector/VERSION: -------------------------------------------------------------------------------- 1 | 6.17.0 2 | -------------------------------------------------------------------------------- /otelcollector/build/linux/.dockerignore: -------------------------------------------------------------------------------- 1 | shared/process_utilities_windows.go 2 | -------------------------------------------------------------------------------- /otelcollector/build/linux/rpm-repos/mariner-official-extras.repo: -------------------------------------------------------------------------------- 1 | [mariner-official-extras] 2 | name=CBL-Mariner Official Extras 3 | baseurl=https://packages.microsoft.com/cbl-mariner/2.0/prod/extras/x86_64/ 4 | gpgkey=file:///etc/pki/rpm-gpg/MICROSOFT-RPM-GPG-KEY file:///etc/pki/rpm-gpg/MICROSOFT-METADATA-GPG-KEY 5 | gpgcheck=1 6 | repo_gpgcheck=1 7 | enabled=1 8 | skip_if_unavailable=True 9 | sslverify=1 10 | -------------------------------------------------------------------------------- /otelcollector/build/windows/.dockerignore: -------------------------------------------------------------------------------- 1 | shared/process_utilities_linux.go 2 | -------------------------------------------------------------------------------- /otelcollector/configmapparser/default-prom-configs/acstorCapacityProvisionerDefaultFile.yml: -------------------------------------------------------------------------------- 1 | scrape_configs: 2 | - job_name: acstor-capacity-provisioner 3 | honor_labels: true 4 | scrape_interval: $$SCRAPE_INTERVAL$$ 5 | scheme: http 6 | kubernetes_sd_configs: 7 | - role: pod 8 | namespaces: 9 | names: 10 | - acstor 11 | relabel_configs: 12 | # Include only specified namespace 13 | - source_labels: [__meta_kubernetes_namespace] 14 | action: keep 15 | regex: acstor 16 | # Include only specified pods 17 | - source_labels: [__meta_kubernetes_pod_label_app_kubernetes_io_name, __meta_kubernetes_pod_label_app_kubernetes_io_component] 18 | action: keep 19 | regex: capacity-provisioner;capacity-provisoner 20 | # Include only specified ports 21 | - source_labels: [__meta_kubernetes_pod_container_port_name] 22 | action: keep 23 | regex: metrics 24 | -------------------------------------------------------------------------------- /otelcollector/configmapparser/default-prom-configs/acstorMetricsExporterDefaultFile.yml: -------------------------------------------------------------------------------- 1 | scrape_configs: 2 | - job_name: acstor-metrics-exporter 3 | honor_labels: true 4 | scrape_interval: $$SCRAPE_INTERVAL$$ 5 | scheme: http 6 | kubernetes_sd_configs: 7 | - role: pod 8 | namespaces: 9 | names: 10 | - acstor 11 | relabel_configs: 12 | # Include only specified namespace 13 | - source_labels: [__meta_kubernetes_namespace] 14 | action: keep 15 | regex: acstor 16 | # Include only specified pods 17 | - source_labels: [__meta_kubernetes_pod_label_app_kubernetes_io_name, __meta_kubernetes_pod_label_app_kubernetes_io_component] 18 | action: keep 19 | regex: metrics-exporter;monitor 20 | # Include only specified ports 21 | - source_labels: [__meta_kubernetes_pod_container_port_name] 22 | action: keep 23 | regex: metrics 24 | -------------------------------------------------------------------------------- /otelcollector/configmapparser/default-prom-configs/apiserverDefault.yml: -------------------------------------------------------------------------------- 1 | scrape_configs: 2 | - job_name: kube-apiserver 3 | scrape_interval: $$SCRAPE_INTERVAL$$ 4 | label_limit: 63 5 | label_name_length_limit: 511 6 | label_value_length_limit: 1023 7 | kubernetes_sd_configs: 8 | - role: endpoints 9 | namespaces: 10 | names: 11 | - default 12 | scheme: https 13 | tls_config: 14 | ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt 15 | insecure_skip_verify: true 16 | bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token 17 | relabel_configs: 18 | - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name] 19 | action: keep 20 | regex: default;kubernetes;https 21 | -------------------------------------------------------------------------------- /otelcollector/configmapparser/default-prom-configs/cadvisorDefaultDs.yml: -------------------------------------------------------------------------------- 1 | scrape_configs: 2 | - job_name: cadvisor 3 | scheme: https 4 | metrics_path: /metrics/cadvisor 5 | scrape_interval: $$SCRAPE_INTERVAL$$ 6 | label_limit: 63 7 | label_name_length_limit: 511 8 | label_value_length_limit: 1023 9 | tls_config: 10 | ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt 11 | insecure_skip_verify: true 12 | bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token 13 | relabel_configs: 14 | - source_labels: [__address__] 15 | replacement: '$$NODE_NAME$$' 16 | target_label: instance 17 | static_configs: 18 | - targets: ['$$NODE_IP$$:10250'] 19 | -------------------------------------------------------------------------------- /otelcollector/configmapparser/default-prom-configs/cadvisorDefaultRsAdvanced.yml: -------------------------------------------------------------------------------- 1 | scrape_configs: 2 | - job_name: cadvisor 3 | scheme: https 4 | metrics_path: /metrics/cadvisor 5 | scrape_interval: $$SCRAPE_INTERVAL$$ 6 | label_limit: 63 7 | label_name_length_limit: 511 8 | label_value_length_limit: 1023 9 | tls_config: 10 | ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt 11 | insecure_skip_verify: true 12 | bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token 13 | relabel_configs: 14 | - source_labels: [__meta_kubernetes_node_label_kubernetes_io_os] 15 | action: keep 16 | regex: "linux" 17 | metric_relabel_configs: 18 | - source_labels: [__name__] 19 | action: keep 20 | regex: "up" 21 | kubernetes_sd_configs: 22 | - role: node 23 | -------------------------------------------------------------------------------- /otelcollector/configmapparser/default-prom-configs/cadvisorDefaultRsSimple.yml: -------------------------------------------------------------------------------- 1 | scrape_configs: 2 | - job_name: cadvisor 3 | scheme: https 4 | metrics_path: /metrics/cadvisor 5 | scrape_interval: $$SCRAPE_INTERVAL$$ 6 | label_limit: 63 7 | label_name_length_limit: 511 8 | label_value_length_limit: 1023 9 | tls_config: 10 | ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt 11 | insecure_skip_verify: true 12 | bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token 13 | relabel_configs: 14 | - source_labels: [__meta_kubernetes_node_label_kubernetes_io_os] 15 | action: keep 16 | regex: "linux" 17 | kubernetes_sd_configs: 18 | - role: node 19 | -------------------------------------------------------------------------------- /otelcollector/configmapparser/default-prom-configs/corednsDefault.yml: -------------------------------------------------------------------------------- 1 | scrape_configs: 2 | - job_name: kube-dns 3 | scheme: http 4 | metrics_path: /metrics 5 | scrape_interval: $$SCRAPE_INTERVAL$$ 6 | label_limit: 63 7 | label_name_length_limit: 511 8 | label_value_length_limit: 1023 9 | relabel_configs: 10 | - action: keep 11 | source_labels: [__meta_kubernetes_namespace,__meta_kubernetes_pod_name] 12 | separator: '/' 13 | regex: 'kube-system/coredns.+' 14 | - source_labels: [__meta_kubernetes_pod_container_port_name] 15 | action: keep 16 | regex: metrics 17 | - source_labels: [__meta_kubernetes_pod_name] 18 | target_label: pod 19 | kubernetes_sd_configs: 20 | - role: pod 21 | namespaces: 22 | names: 23 | - kube-system 24 | -------------------------------------------------------------------------------- /otelcollector/configmapparser/default-prom-configs/kappieBasicDefaultDs.yml: -------------------------------------------------------------------------------- 1 | scrape_configs: 2 | - job_name: "kappie-basic" 3 | scrape_interval: $$SCRAPE_INTERVAL$$ 4 | label_limit: 63 5 | label_name_length_limit: 511 6 | label_value_length_limit: 1023 7 | kubernetes_sd_configs: 8 | - role: service 9 | scheme: http 10 | relabel_configs: 11 | - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name] 12 | action: keep 13 | regex: kube-system;kappie-svc 14 | - source_labels: [__address__] 15 | target_label: __address__ 16 | replacement: '$$NODE_IP$$:10093' 17 | action: replace 18 | - source_labels: [__address__] 19 | replacement: '$$NODE_NAME$$' 20 | target_label: instance 21 | -------------------------------------------------------------------------------- /otelcollector/configmapparser/default-prom-configs/kubeletDefaultDs.yml: -------------------------------------------------------------------------------- 1 | scrape_configs: 2 | - job_name: kubelet 3 | scheme: https 4 | metrics_path: /metrics 5 | scrape_interval: $$SCRAPE_INTERVAL$$ 6 | label_limit: 63 7 | label_name_length_limit: 511 8 | label_value_length_limit: 1023 9 | tls_config: 10 | ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt 11 | insecure_skip_verify: true 12 | bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token 13 | relabel_configs: 14 | - source_labels: [__metrics_path__] 15 | regex: (.*) 16 | target_label: metrics_path 17 | - source_labels: [__address__] 18 | replacement: '$$NODE_NAME$$' 19 | target_label: instance 20 | - source_labels: [__address__] 21 | replacement: '$$OS_TYPE$$' 22 | target_label: "kubernetes_io_os" 23 | static_configs: 24 | - targets: ['$$NODE_IP$$:10250'] -------------------------------------------------------------------------------- /otelcollector/configmapparser/default-prom-configs/kubeletDefaultRsAdvanced.yml: -------------------------------------------------------------------------------- 1 | scrape_configs: 2 | - job_name: kubelet 3 | scheme: https 4 | metrics_path: /metrics 5 | scrape_interval: $$SCRAPE_INTERVAL$$ 6 | label_limit: 63 7 | label_name_length_limit: 511 8 | label_value_length_limit: 1023 9 | tls_config: 10 | ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt 11 | insecure_skip_verify: true 12 | bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token 13 | relabel_configs: 14 | - source_labels: [__metrics_path__] 15 | regex: (.*) 16 | target_label: metrics_path 17 | - source_labels: [__meta_kubernetes_node_label_kubernetes_io_os] 18 | target_label: "kubernetes_io_os" 19 | metric_relabel_configs: 20 | - source_labels: [kubernetes_io_os] 21 | regex: "windows" 22 | action: keep 23 | kubernetes_sd_configs: 24 | - role: node 25 | -------------------------------------------------------------------------------- /otelcollector/configmapparser/default-prom-configs/kubeletDefaultRsAdvancedWindowsDaemonset.yml: -------------------------------------------------------------------------------- 1 | scrape_configs: 2 | - job_name: kubelet 3 | scheme: https 4 | metrics_path: /metrics 5 | scrape_interval: $$SCRAPE_INTERVAL$$ 6 | label_limit: 63 7 | label_name_length_limit: 511 8 | label_value_length_limit: 1023 9 | tls_config: 10 | ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt 11 | insecure_skip_verify: true 12 | bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token 13 | relabel_configs: 14 | - source_labels: [__metrics_path__] 15 | regex: (.*) 16 | target_label: metrics_path 17 | - source_labels: [__meta_kubernetes_node_label_kubernetes_io_os] 18 | target_label: "kubernetes_io_os" 19 | metric_relabel_configs: 20 | - source_labels: [__name__] 21 | action: keep 22 | regex: "up" 23 | kubernetes_sd_configs: 24 | - role: node 25 | -------------------------------------------------------------------------------- /otelcollector/configmapparser/default-prom-configs/kubeletDefaultRsSimple.yml: -------------------------------------------------------------------------------- 1 | scrape_configs: 2 | - job_name: kubelet 3 | scheme: https 4 | metrics_path: /metrics 5 | scrape_interval: $$SCRAPE_INTERVAL$$ 6 | label_limit: 63 7 | label_name_length_limit: 511 8 | label_value_length_limit: 1023 9 | tls_config: 10 | ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt 11 | insecure_skip_verify: true 12 | bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token 13 | relabel_configs: 14 | - source_labels: [__metrics_path__] 15 | regex: (.*) 16 | target_label: metrics_path 17 | - source_labels: [__meta_kubernetes_node_label_kubernetes_io_os] 18 | target_label: "kubernetes_io_os" 19 | kubernetes_sd_configs: 20 | - role: node 21 | -------------------------------------------------------------------------------- /otelcollector/configmapparser/default-prom-configs/kubeproxyDefault.yml: -------------------------------------------------------------------------------- 1 | scrape_configs: 2 | - job_name: kube-proxy 3 | scrape_interval: $$SCRAPE_INTERVAL$$ 4 | label_limit: 63 5 | label_name_length_limit: 511 6 | label_value_length_limit: 1023 7 | kubernetes_sd_configs: 8 | - role: pod 9 | namespaces: 10 | names: 11 | - kube-system 12 | relabel_configs: 13 | - action: keep 14 | source_labels: [__meta_kubernetes_namespace,__meta_kubernetes_pod_name] 15 | separator: '/' 16 | regex: 'kube-system/kube-proxy.+' 17 | - source_labels: 18 | - __address__ 19 | action: replace 20 | target_label: __address__ 21 | regex: (.+?)(\:\d+)? 22 | replacement: $$1:10249 23 | -------------------------------------------------------------------------------- /otelcollector/configmapparser/default-prom-configs/kubestateDefault.yml: -------------------------------------------------------------------------------- 1 | scrape_configs: 2 | - job_name: kube-state-metrics 3 | scrape_interval: $$SCRAPE_INTERVAL$$ 4 | label_limit: 63 5 | label_name_length_limit: 511 6 | label_value_length_limit: 1023 7 | static_configs: 8 | - targets: ['$$KUBE_STATE_NAME$$.$$POD_NAMESPACE$$.svc.cluster.local:8080'] 9 | -------------------------------------------------------------------------------- /otelcollector/configmapparser/default-prom-configs/networkobservabilityCiliumDefaultDs.yml: -------------------------------------------------------------------------------- 1 | scrape_configs: 2 | - job_name: "networkobservability-cilium" 3 | scrape_interval: $$SCRAPE_INTERVAL$$ 4 | kubernetes_sd_configs: 5 | - role: service 6 | scheme: http 7 | relabel_configs: 8 | - source_labels: 9 | [ 10 | __meta_kubernetes_namespace, 11 | __meta_kubernetes_service_name, 12 | __meta_kubernetes_service_port_name, 13 | ] 14 | action: keep 15 | regex: kube-system;network-observability;cilium 16 | 17 | - source_labels: [__address__] 18 | target_label: __address__ 19 | replacement: $$NODE_IP$$ 20 | action: replace 21 | 22 | - source_labels: [__address__, __meta_kubernetes_service_port_number] 23 | action: replace 24 | regex: ([^:]+)(?::\d+)?;(\d+) 25 | replacement: $1:$2 26 | target_label: __address__ 27 | 28 | - source_labels: [__address__] 29 | replacement: "$$NODE_NAME$$" 30 | target_label: instance 31 | -------------------------------------------------------------------------------- /otelcollector/configmapparser/default-prom-configs/networkobservabilityHubbleDefaultDs.yml: -------------------------------------------------------------------------------- 1 | scrape_configs: 2 | - job_name: "networkobservability-hubble" 3 | scrape_interval: $$SCRAPE_INTERVAL$$ 4 | kubernetes_sd_configs: 5 | - role: service 6 | scheme: http 7 | relabel_configs: 8 | - source_labels: 9 | [ 10 | __meta_kubernetes_namespace, 11 | __meta_kubernetes_service_name, 12 | __meta_kubernetes_service_port_name, 13 | ] 14 | action: keep 15 | regex: kube-system;network-observability;hubble 16 | 17 | - source_labels: [__address__] 18 | target_label: __address__ 19 | replacement: $$NODE_IP$$ 20 | action: replace 21 | 22 | - source_labels: [__address__, __meta_kubernetes_service_port_number] 23 | action: replace 24 | regex: ([^:]+)(?::\d+)?;(\d+) 25 | replacement: $1:$2 26 | target_label: __address__ 27 | 28 | - source_labels: [__address__] 29 | replacement: "$$NODE_NAME$$" 30 | target_label: instance 31 | -------------------------------------------------------------------------------- /otelcollector/configmapparser/default-prom-configs/networkobservabilityRetinaDefaultDs.yml: -------------------------------------------------------------------------------- 1 | scrape_configs: 2 | - job_name: "networkobservability-retina" 3 | scrape_interval: $$SCRAPE_INTERVAL$$ 4 | kubernetes_sd_configs: 5 | - role: service 6 | scheme: http 7 | relabel_configs: 8 | - source_labels: 9 | [ 10 | __meta_kubernetes_namespace, 11 | __meta_kubernetes_service_name, 12 | __meta_kubernetes_service_port_name, 13 | ] 14 | action: keep 15 | regex: kube-system;network-observability;retina 16 | 17 | - source_labels: [__address__] 18 | target_label: __address__ 19 | replacement: $$NODE_IP$$ 20 | action: replace 21 | 22 | - source_labels: [__address__, __meta_kubernetes_service_port_number] 23 | action: replace 24 | regex: ([^:]+)(?::\d+)?;(\d+) 25 | replacement: $1:$2 26 | target_label: __address__ 27 | 28 | - source_labels: [__address__] 29 | replacement: "$$NODE_NAME$$" 30 | target_label: instance 31 | -------------------------------------------------------------------------------- /otelcollector/configmapparser/default-prom-configs/nodeexporterDefaultDs.yml: -------------------------------------------------------------------------------- 1 | scrape_configs: 2 | - job_name: node 3 | scheme: http 4 | scrape_interval: $$SCRAPE_INTERVAL$$ 5 | label_limit: 63 6 | label_name_length_limit: 511 7 | label_value_length_limit: 1023 8 | relabel_configs: 9 | - source_labels: [__metrics_path__] 10 | regex: (.*) 11 | target_label: metrics_path 12 | - source_labels: [__address__] 13 | replacement: '$$NODE_NAME$$' 14 | target_label: instance 15 | static_configs: 16 | - targets: ['$$NODE_IP$$:$$NODE_EXPORTER_TARGETPORT$$'] 17 | -------------------------------------------------------------------------------- /otelcollector/configmapparser/default-prom-configs/nodeexporterDefaultRsAdvanced.yml: -------------------------------------------------------------------------------- 1 | scrape_configs: 2 | - job_name: node 3 | scheme: http 4 | scrape_interval: $$SCRAPE_INTERVAL$$ 5 | label_limit: 63 6 | label_name_length_limit: 511 7 | label_value_length_limit: 1023 8 | kubernetes_sd_configs: 9 | - role: endpoints 10 | namespaces: 11 | names: 12 | - $$POD_NAMESPACE$$ 13 | relabel_configs: 14 | - action: keep 15 | source_labels: [__meta_kubernetes_endpoints_name] 16 | regex: $$NODE_EXPORTER_NAME$$ 17 | - source_labels: [__metrics_path__] 18 | regex: (.*) 19 | target_label: metrics_path 20 | - source_labels: [__meta_kubernetes_endpoint_node_name] 21 | regex: (.*) 22 | target_label: instance 23 | metric_relabel_configs: 24 | - source_labels: [__name__] 25 | action: keep 26 | regex: "up" 27 | -------------------------------------------------------------------------------- /otelcollector/configmapparser/default-prom-configs/nodeexporterDefaultRsSimple.yml: -------------------------------------------------------------------------------- 1 | scrape_configs: 2 | - job_name: node 3 | scheme: http 4 | scrape_interval: $$SCRAPE_INTERVAL$$ 5 | label_limit: 63 6 | label_name_length_limit: 511 7 | label_value_length_limit: 1023 8 | kubernetes_sd_configs: 9 | - role: endpoints 10 | namespaces: 11 | names: 12 | - $$POD_NAMESPACE$$ 13 | relabel_configs: 14 | - action: keep 15 | source_labels: [__meta_kubernetes_endpoints_name] 16 | regex: $$NODE_EXPORTER_NAME$$ 17 | - source_labels: [__metrics_path__] 18 | regex: (.*) 19 | target_label: metrics_path 20 | - source_labels: [__meta_kubernetes_endpoint_node_name] 21 | regex: (.*) 22 | target_label: instance 23 | -------------------------------------------------------------------------------- /otelcollector/configmapparser/default-prom-configs/podannotationsDefault.yml: -------------------------------------------------------------------------------- 1 | scrape_configs: 2 | - job_name: 'kubernetes-pods' 3 | scrape_interval: $$SCRAPE_INTERVAL$$ 4 | label_limit: 63 5 | label_name_length_limit: 511 6 | label_value_length_limit: 1023 7 | kubernetes_sd_configs: 8 | - role: pod 9 | relabel_configs: 10 | - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape] 11 | action: keep 12 | regex: true 13 | - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path] 14 | action: replace 15 | target_label: __metrics_path__ 16 | regex: (.+) 17 | - source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port] 18 | action: replace 19 | regex: ([^:]+)(?::\d+)?;(\d+) 20 | replacement: $1:$2 21 | target_label: __address__ 22 | - action: labelmap 23 | regex: __meta_kubernetes_pod_label_(.+) 24 | - source_labels: [__meta_kubernetes_namespace] 25 | action: replace 26 | target_label: kubernetes_namespace 27 | - source_labels: [__meta_kubernetes_pod_name] 28 | action: replace 29 | target_label: kubernetes_pod_name 30 | -------------------------------------------------------------------------------- /otelcollector/configmapparser/default-prom-configs/prometheusCollectorHealth.yml: -------------------------------------------------------------------------------- 1 | scrape_configs: 2 | - job_name: prometheus_collector_health 3 | scrape_interval: $$SCRAPE_INTERVAL$$ 4 | label_limit: 63 5 | label_name_length_limit: 511 6 | label_value_length_limit: 1023 7 | static_configs: 8 | - targets: ['127.0.0.1:2234'] 9 | -------------------------------------------------------------------------------- /otelcollector/configmapparser/default-prom-configs/windowsexporterDefaultDs.yml: -------------------------------------------------------------------------------- 1 | scrape_configs: 2 | - job_name: windows-exporter 3 | scheme: http 4 | scrape_interval: $$SCRAPE_INTERVAL$$ 5 | label_limit: 63 6 | label_name_length_limit: 511 7 | label_value_length_limit: 1023 8 | relabel_configs: 9 | - source_labels: [__address__] 10 | replacement: '$$NODE_NAME$$' 11 | target_label: instance 12 | static_configs: 13 | - targets: ['$$NODE_IP$$:9182'] -------------------------------------------------------------------------------- /otelcollector/configmapparser/default-prom-configs/windowsexporterDefaultRsSimple.yml: -------------------------------------------------------------------------------- 1 | scrape_configs: 2 | - job_name: windows-exporter 3 | scheme: http 4 | scrape_interval: $$SCRAPE_INTERVAL$$ 5 | label_limit: 63 6 | label_name_length_limit: 511 7 | label_value_length_limit: 1023 8 | tls_config: 9 | ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt 10 | insecure_skip_verify: true 11 | bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token 12 | kubernetes_sd_configs: 13 | - role: node 14 | relabel_configs: 15 | - source_labels: [__meta_kubernetes_node_name] 16 | target_label: instance 17 | - action: keep 18 | source_labels: [__meta_kubernetes_node_label_kubernetes_io_os] 19 | regex: windows 20 | - source_labels: 21 | - __address__ 22 | action: replace 23 | target_label: __address__ 24 | regex: (.+?)(\:\d+)? 25 | replacement: $$1:9182 -------------------------------------------------------------------------------- /otelcollector/configmapparser/default-prom-configs/windowskubeproxyDefaultDs.yml: -------------------------------------------------------------------------------- 1 | scrape_configs: 2 | - job_name: kube-proxy-windows 3 | scheme: http 4 | scrape_interval: $$SCRAPE_INTERVAL$$ 5 | label_limit: 63 6 | label_name_length_limit: 511 7 | label_value_length_limit: 1023 8 | relabel_configs: 9 | - source_labels: [__address__] 10 | replacement: '$$NODE_NAME$$' 11 | target_label: instance 12 | static_configs: 13 | - targets: ['$$NODE_IP$$:10249'] 14 | -------------------------------------------------------------------------------- /otelcollector/configmapparser/default-prom-configs/windowskubeproxyDefaultRsSimple.yml: -------------------------------------------------------------------------------- 1 | scrape_configs: 2 | - job_name: kube-proxy-windows 3 | scheme: http 4 | scrape_interval: $$SCRAPE_INTERVAL$$ 5 | label_limit: 63 6 | label_name_length_limit: 511 7 | label_value_length_limit: 1023 8 | tls_config: 9 | ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt 10 | insecure_skip_verify: true 11 | bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token 12 | kubernetes_sd_configs: 13 | - role: node 14 | relabel_configs: 15 | - action: keep 16 | source_labels: [__meta_kubernetes_node_label_kubernetes_io_os] 17 | regex: windows 18 | - source_labels: [__meta_kubernetes_node_name] 19 | target_label: instance 20 | - source_labels: 21 | - __address__ 22 | action: replace 23 | target_label: __address__ 24 | regex: (.+?)(\:\d+)? 25 | replacement: $$1:10249 26 | -------------------------------------------------------------------------------- /otelcollector/configmaps/ama-metrics-prometheus-config-configmap.yaml: -------------------------------------------------------------------------------- 1 | kind: ConfigMap 2 | apiVersion: v1 3 | data: 4 | prometheus-config: |- 5 | global: 6 | scrape_interval: 15s 7 | scrape_configs: 8 | - job_name: 9 | - job_name: 10 | metadata: 11 | name: ama-metrics-prometheus-config 12 | namespace: kube-system -------------------------------------------------------------------------------- /otelcollector/configmaps/ama-metrics-prometheus-config-node-configmap.yaml: -------------------------------------------------------------------------------- 1 | kind: ConfigMap 2 | apiVersion: v1 3 | data: 4 | prometheus-config: |- 5 | global: 6 | scrape_interval: 15s 7 | scrape_configs: 8 | - job_name: 9 | - job_name: 10 | metadata: 11 | name: ama-metrics-prometheus-config-node 12 | namespace: kube-system -------------------------------------------------------------------------------- /otelcollector/configuration-reader-builder/Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: configurationreader 2 | configurationreader: 3 | @echo "========================= Building configurationreader =========================" 4 | @echo "========================= cleanup existing configurationreader =========================" 5 | rm -rf configurationreader 6 | @echo "========================= go get =========================" 7 | go get 8 | @echo "========================= go build =========================" 9 | go build -buildmode=pie -ldflags '-linkmode external -extldflags=-Wl,-z,now' -o configurationreader . -------------------------------------------------------------------------------- /otelcollector/customresources/pod-monitor-template.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: azmonitoring.coreos.com/v1 2 | kind: PodMonitor 3 | metadata: 4 | name: 5 | spec: 6 | # The following limits - labelLimit, labelNameLengthLimit and labelValueLengthLimit should exist in the pod monitor CR 7 | # These ensure that the metrics don't get dropped because labels/labelnames/labelvalues exceed the limits supported by the processing pipeline 8 | labelLimit: 63 9 | labelNameLengthLimit: 511 10 | labelValueLengthLimit: 1023 11 | # rest of the pod monitor 12 | 13 | -------------------------------------------------------------------------------- /otelcollector/customresources/service-monitor-template.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: azmonitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | name: 5 | spec: 6 | # The following limits - labelLimit, labelNameLengthLimit and labelValueLengthLimit should exist in the service monitor CR 7 | # These ensure that the metrics don't get dropped because labels/labelnames/labelvalues exceed the limits supported by the processing pipeline 8 | labelLimit: 63 9 | labelNameLengthLimit: 511 10 | labelValueLengthLimit: 1023 11 | # rest of the service monitor 12 | 13 | -------------------------------------------------------------------------------- /otelcollector/deploy/addon-chart/azure-monitor-metrics-addon/templates/ama-metrics-clusterRoleBinding.yaml: -------------------------------------------------------------------------------- 1 | kind: ClusterRoleBinding 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | metadata: 4 | name: ama-metrics-clusterrolebinding 5 | subjects: 6 | - kind: ServiceAccount 7 | name: ama-metrics-serviceaccount 8 | namespace: kube-system 9 | roleRef: 10 | kind: ClusterRole 11 | name: ama-metrics-reader 12 | apiGroup: rbac.authorization.k8s.io 13 | -------------------------------------------------------------------------------- /otelcollector/deploy/addon-chart/azure-monitor-metrics-addon/templates/ama-metrics-collector-hpa.yaml: -------------------------------------------------------------------------------- 1 | {{- $arcExtensionSettings := include "arc-extension-settings" . | fromYaml }} 2 | {{- if $arcExtensionSettings.hpaEnabled }} 3 | {{- $amaMetricsHpa := include "ama-metrics-merge-custom-hpa" . | fromYaml }} 4 | apiVersion: autoscaling/v2 5 | kind: HorizontalPodAutoscaler 6 | metadata: 7 | name: ama-metrics-hpa 8 | namespace: kube-system 9 | labels: 10 | component: ama-metrics-hpa 11 | kubernetes.azure.com/managedby: aks 12 | spec: 13 | scaleTargetRef: 14 | apiVersion: apps/v1 15 | kind: Deployment 16 | name: ama-metrics 17 | minReplicas: {{ $amaMetricsHpa.amaMetricsMinReplicasFromHelper }} 18 | maxReplicas: {{ $amaMetricsHpa.amaMetricsMaxReplicasFromHelper }} 19 | metrics: 20 | - type: ContainerResource 21 | containerResource: 22 | name: memory 23 | container: prometheus-collector 24 | target: 25 | averageValue: 5Gi 26 | type: AverageValue 27 | behavior: 28 | scaleDown: 29 | stabilizationWindowSeconds: 300 30 | policies: 31 | - type: Pods 32 | value: 1 33 | periodSeconds: 300 34 | {{- end }} -------------------------------------------------------------------------------- /otelcollector/deploy/addon-chart/azure-monitor-metrics-addon/templates/ama-metrics-ksm-clusterrolebinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: ama-metrics 6 | app.kubernetes.io/name: ama-metrics-ksm 7 | app.kubernetes.io/part-of: ama-metrics-ksm 8 | app.kubernetes.io/version: 2.12.0 9 | helm.sh/chart: azure-monitor-metrics-addon-0.1.0 10 | name: ama-metrics-ksm 11 | roleRef: 12 | apiGroup: rbac.authorization.k8s.io 13 | kind: ClusterRole 14 | name: ama-metrics-ksm 15 | subjects: 16 | - kind: ServiceAccount 17 | name: ama-metrics-ksm 18 | namespace: kube-system 19 | -------------------------------------------------------------------------------- /otelcollector/deploy/addon-chart/azure-monitor-metrics-addon/templates/ama-metrics-ksm-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: ama-metrics-ksm 5 | namespace: kube-system 6 | labels: 7 | app.kubernetes.io/component: ama-metrics 8 | app.kubernetes.io/name: ama-metrics-ksm 9 | app.kubernetes.io/part-of: ama-metrics-ksm 10 | app.kubernetes.io/version: 2.12.0 11 | helm.sh/chart: azure-monitor-metrics-addon-0.1.0 12 | annotations: 13 | prometheus.io/scrape: 'true' 14 | spec: 15 | type: "ClusterIP" 16 | ports: 17 | - name: "http" 18 | protocol: TCP 19 | port: 8080 20 | targetPort: 8080 21 | selector: 22 | app.kubernetes.io/name: ama-metrics-ksm 23 | -------------------------------------------------------------------------------- /otelcollector/deploy/addon-chart/azure-monitor-metrics-addon/templates/ama-metrics-ksm-serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: ama-metrics 6 | app.kubernetes.io/name: ama-metrics-ksm 7 | app.kubernetes.io/part-of: ama-metrics-ksm 8 | app.kubernetes.io/version: 2.12.0 9 | helm.sh/chart: azure-monitor-metrics-addon-0.1.0 10 | name: ama-metrics-ksm 11 | namespace: kube-system 12 | -------------------------------------------------------------------------------- /otelcollector/deploy/addon-chart/azure-monitor-metrics-addon/templates/ama-metrics-serviceAccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: ama-metrics-serviceaccount 5 | namespace: kube-system -------------------------------------------------------------------------------- /otelcollector/deploy/addon-chart/azure-monitor-metrics-addon/templates/ama-metrics-targetallocator-service.yaml: -------------------------------------------------------------------------------- 1 | {{- $arcExtensionSettings := include "arc-extension-settings" . | fromYaml }} 2 | {{- if $arcExtensionSettings.operatorEnabled }} 3 | apiVersion: v1 4 | kind: Service 5 | metadata: 6 | labels: 7 | component: ama-metrics-operator-targets 8 | kubernetes.azure.com/managedby: aks 9 | name: ama-metrics-operator-targets 10 | namespace: kube-system 11 | spec: 12 | internalTrafficPolicy: Cluster 13 | ipFamilies: 14 | - IPv4 15 | ipFamilyPolicy: SingleStack 16 | ports: 17 | - name: targetallocation 18 | port: 80 19 | protocol: TCP 20 | targetPort: 8080 21 | selector: 22 | rsName: ama-metrics-operator-targets 23 | sessionAffinity: None 24 | type: ClusterIP 25 | status: 26 | loadBalancer: {} 27 | {{- end }} 28 | -------------------------------------------------------------------------------- /otelcollector/deploy/addon-chart/ccp-metrics-plugin/templates/ama-metrics-role.yaml: -------------------------------------------------------------------------------- 1 | # Source: ama-metrics-cpp/templates/ama-metrics-role.yaml 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: Role 4 | metadata: 5 | name: ama-metrics-ccp-role 6 | namespace: {{ .Values.global.commonGlobals.Customer.Namespace }} 7 | rules: 8 | - apiGroups: [""] 9 | resources: 10 | [ 11 | "pods", 12 | "endpoints", 13 | "services", 14 | ] 15 | verbs: ["list", "get", "watch"] 16 | --- 17 | # Source: ama-metrics-cpp/templates/ama-metrics-role.yaml 18 | apiVersion: rbac.authorization.k8s.io/v1 19 | kind: Role 20 | metadata: 21 | name: ama-metrics-ccp-role 22 | namespace: kube-system 23 | rules: 24 | - apiGroups: [""] 25 | resources: ["secrets"] 26 | resourceNames: ["aad-msi-auth-token"] 27 | verbs: ["get", "watch"] -------------------------------------------------------------------------------- /otelcollector/deploy/addon-chart/ccp-metrics-plugin/templates/ama-metrics-roleBinding.yaml: -------------------------------------------------------------------------------- 1 | kind: RoleBinding 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | metadata: 4 | name: ama-metrics-ccp-rolebinding 5 | namespace: kube-system 6 | subjects: 7 | - kind: ServiceAccount 8 | name: ama-metrics-ccp-sa 9 | namespace: {{ .Values.global.commonGlobals.Customer.Namespace }} 10 | roleRef: 11 | kind: Role 12 | name: ama-metrics-ccp-role 13 | apiGroup: rbac.authorization.k8s.io 14 | --- 15 | kind: RoleBinding 16 | apiVersion: rbac.authorization.k8s.io/v1 17 | metadata: 18 | name: ama-metrics-ccp-rolebinding 19 | namespace: {{ .Values.global.commonGlobals.Customer.Namespace }} 20 | subjects: 21 | - kind: ServiceAccount 22 | name: ama-metrics-ccp-sa 23 | namespace: {{ .Values.global.commonGlobals.Customer.Namespace }} 24 | roleRef: 25 | kind: Role 26 | name: ama-metrics-ccp-role 27 | apiGroup: rbac.authorization.k8s.io 28 | -------------------------------------------------------------------------------- /otelcollector/deploy/addon-chart/ccp-metrics-plugin/templates/ama-metrics-serviceAccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: ama-metrics-ccp-sa 5 | namespace: {{ .Values.global.commonGlobals.Customer.Namespace }} -------------------------------------------------------------------------------- /otelcollector/deploy/chart/prometheus-collector/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *.orig 18 | *~ 19 | # Various IDEs 20 | .project 21 | .idea/ 22 | *.tmproj 23 | .vscode/ 24 | -------------------------------------------------------------------------------- /otelcollector/deploy/chart/prometheus-collector/templates/NOTES.txt: -------------------------------------------------------------------------------- 1 | Thank you for installing {{ .Chart.Name }}. 2 | 3 | Your release is named {{ .Release.Name }}. 4 | You deployed into {{ .Release.Namespace }} namespace 5 | 6 | To learn more about the release, try: 7 | 8 | $ helm status {{ .Release.Name }} --namespace={{ .Release.Namespace }} 9 | $ helm get all {{ .Release.Name }} --namespace={{ .Release.Namespace }} 10 | -------------------------------------------------------------------------------- /otelcollector/deploy/chart/prometheus-collector/templates/prometheus-collector-azure-keyVault-secret.yaml: -------------------------------------------------------------------------------- 1 | {{- if and (not .Values.azureKeyVault.useManagedIdentity) (not .Values.useMonitoringAccount) }} 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: {{ template "prometheus-collector.fullname" . }}-akv-creds 6 | namespace: {{ .Release.Namespace }} 7 | labels: 8 | {{- include "prometheus-collector.labels" . | nindent 4 }} 9 | secrets-store.csi.k8s.io/used: "true" 10 | type: Opaque 11 | data: 12 | clientid: {{ required "azureKeyVault.clientId is required" .Values.azureKeyVault.clientId | toString | b64enc | quote }} 13 | clientsecret: {{ required "azureKeyVault.clientSecret is required" .Values.azureKeyVault.clientSecret | toString | b64enc | quote }} 14 | {{- end }} 15 | -------------------------------------------------------------------------------- /otelcollector/deploy/chart/prometheus-collector/templates/prometheus-collector-clusterRole.yaml: -------------------------------------------------------------------------------- 1 | kind: ClusterRole 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | metadata: 4 | name: {{ template "prometheus-collector.fullname" . }}-reader 5 | labels: 6 | {{- include "prometheus-collector.labels" . | nindent 4 }} 7 | rules: 8 | - apiGroups: [""] 9 | resources: 10 | [ 11 | "pods", 12 | "nodes", 13 | "nodes/stats", 14 | "nodes/metrics", 15 | "nodes/proxy", 16 | "namespaces", 17 | "services", 18 | "endpoints", 19 | "ingress" 20 | ] 21 | verbs: ["list", "get", "watch"] 22 | - apiGroups: 23 | - networking.k8s.io 24 | resources: 25 | - ingresses 26 | verbs: ["list", "get", "watch"] 27 | - apiGroups: [""] 28 | resources: ["secrets"] 29 | resourceNames: ["aad-msi-auth-token"] 30 | verbs: ["get", "watch"] 31 | - nonResourceURLs: ["/metrics"] 32 | verbs: ["get"] 33 | -------------------------------------------------------------------------------- /otelcollector/deploy/chart/prometheus-collector/templates/prometheus-collector-clusterRoleBinding.yaml: -------------------------------------------------------------------------------- 1 | kind: ClusterRoleBinding 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | metadata: 4 | name: {{ template "prometheus-collector.fullname" . }}-clusterrolebinding 5 | subjects: 6 | - kind: ServiceAccount 7 | name: {{ template "prometheus-collector.fullname" . }}-serviceaccount 8 | namespace: {{ .Release.Namespace }} 9 | roleRef: 10 | kind: ClusterRole 11 | name: {{ template "prometheus-collector.fullname" . }}-reader 12 | apiGroup: rbac.authorization.k8s.io 13 | -------------------------------------------------------------------------------- /otelcollector/deploy/chart/prometheus-collector/templates/prometheus-collector-configmap.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.prometheusConfig }} 2 | kind: ConfigMap 3 | apiVersion: v1 4 | metadata: 5 | name: {{ .Release.Name }}-prometheus-config 6 | namespace: {{ .Release.Namespace }} 7 | labels: 8 | {{- include "prometheus-collector.labels" . | nindent 4 }} 9 | data: 10 | prometheus-config: |- 11 | {{ .Values.prometheusConfig | nindent 4 }} 12 | {{- end }} 13 | -------------------------------------------------------------------------------- /otelcollector/deploy/chart/prometheus-collector/templates/prometheus-collector-serviceAccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: {{ template "prometheus-collector.fullname" . }}-serviceaccount 5 | namespace: {{ .Release.Namespace }} 6 | labels: 7 | {{- include "prometheus-collector.labels" . | nindent 4 }} -------------------------------------------------------------------------------- /otelcollector/deploy/dependentcharts/kube-state-metrics/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *~ 18 | # Various IDEs 19 | .project 20 | .idea/ 21 | *.tmproj 22 | -------------------------------------------------------------------------------- /otelcollector/deploy/dependentcharts/kube-state-metrics/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: kube-state-metrics 3 | description: Install kube-state-metrics to generate and expose cluster-level metrics 4 | keywords: 5 | - metric 6 | - monitoring 7 | - prometheus 8 | - kubernetes 9 | type: application 10 | version: 5.10.1 11 | appVersion: 2.9.2 12 | home: https://github.com/kubernetes/kube-state-metrics/ 13 | sources: 14 | - https://github.com/kubernetes/kube-state-metrics/ 15 | maintainers: 16 | - name: tariq1890 17 | email: tariq.ibrahim@mulesoft.com 18 | - name: mrueg 19 | email: manuel@rueg.eu 20 | - name: dotdc 21 | email: david@0xdc.me 22 | annotations: 23 | "artifacthub.io/license": Apache-2.0 24 | "artifacthub.io/links": | 25 | - name: Chart Source 26 | url: https://github.com/prometheus-community/helm-charts 27 | -------------------------------------------------------------------------------- /otelcollector/deploy/dependentcharts/kube-state-metrics/templates/NOTES.txt: -------------------------------------------------------------------------------- 1 | kube-state-metrics is a simple service that listens to the Kubernetes API server and generates metrics about the state of the objects. 2 | The exposed metrics can be found here: 3 | https://github.com/kubernetes/kube-state-metrics/blob/master/docs/README.md#exposed-metrics 4 | 5 | The metrics are exported on the HTTP endpoint /metrics on the listening port. 6 | In your case, {{ template "kube-state-metrics.fullname" . }}.{{ template "kube-state-metrics.namespace" . }}.svc.cluster.local:{{ .Values.service.port }}/metrics 7 | 8 | They are served either as plaintext or protobuf depending on the Accept header. 9 | They are designed to be consumed either by Prometheus itself or by a scraper that is compatible with scraping a Prometheus client endpoint. 10 | 11 | {{- if .Values.kubeRBACProxy.enabled}} 12 | 13 | kube-rbac-proxy endpoint protections is enabled: 14 | - Metrics endpoints are now HTTPS 15 | - Ensure that the client authenticates the requests (e.g. via service account) with the following role permissions: 16 | ``` 17 | rules: 18 | - apiGroups: [ "" ] 19 | resources: ["services/{{ template "kube-state-metrics.fullname" . }}"] 20 | verbs: 21 | - get 22 | ``` 23 | {{- end }} 24 | -------------------------------------------------------------------------------- /otelcollector/deploy/dependentcharts/kube-state-metrics/templates/clusterrolebinding.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.rbac.create .Values.rbac.useClusterRole -}} 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRoleBinding 4 | metadata: 5 | labels: 6 | {{- include "kube-state-metrics.labels" . | indent 4 }} 7 | name: {{ template "kube-state-metrics.fullname" . }} 8 | roleRef: 9 | apiGroup: rbac.authorization.k8s.io 10 | kind: ClusterRole 11 | {{- if .Values.rbac.useExistingRole }} 12 | name: {{ .Values.rbac.useExistingRole }} 13 | {{- else }} 14 | name: {{ template "kube-state-metrics.fullname" . }} 15 | {{- end }} 16 | subjects: 17 | - kind: ServiceAccount 18 | name: {{ template "kube-state-metrics.serviceAccountName" . }} 19 | namespace: {{ template "kube-state-metrics.namespace" . }} 20 | {{- end -}} 21 | -------------------------------------------------------------------------------- /otelcollector/deploy/dependentcharts/kube-state-metrics/templates/crs-configmap.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.customResourceState.enabled}} 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: {{ template "kube-state-metrics.fullname" . }}-customresourcestate-config 6 | data: 7 | config.yaml: | 8 | {{- toYaml .Values.customResourceState.config | nindent 4 }} 9 | {{- end }} 10 | -------------------------------------------------------------------------------- /otelcollector/deploy/dependentcharts/kube-state-metrics/templates/extra-manifests.yaml: -------------------------------------------------------------------------------- 1 | {{ range .Values.extraManifests }} 2 | --- 3 | {{ tpl (toYaml .) $ }} 4 | {{ end }} 5 | -------------------------------------------------------------------------------- /otelcollector/deploy/dependentcharts/kube-state-metrics/templates/kubeconfig-secret.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.kubeconfig.enabled -}} 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: {{ template "kube-state-metrics.fullname" . }}-kubeconfig 6 | namespace: {{ template "kube-state-metrics.namespace" . }} 7 | labels: 8 | {{- include "kube-state-metrics.labels" . | indent 4 }} 9 | type: Opaque 10 | data: 11 | config: '{{ .Values.kubeconfig.secret }}' 12 | {{- end -}} 13 | -------------------------------------------------------------------------------- /otelcollector/deploy/dependentcharts/kube-state-metrics/templates/pdb.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.podDisruptionBudget -}} 2 | {{ if $.Capabilities.APIVersions.Has "policy/v1/PodDisruptionBudget" -}} 3 | apiVersion: policy/v1 4 | {{- else -}} 5 | apiVersion: policy/v1beta1 6 | {{- end }} 7 | kind: PodDisruptionBudget 8 | metadata: 9 | name: {{ template "kube-state-metrics.fullname" . }} 10 | namespace: {{ template "kube-state-metrics.namespace" . }} 11 | labels: 12 | {{- include "kube-state-metrics.labels" . | indent 4 }} 13 | spec: 14 | selector: 15 | matchLabels: 16 | app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }} 17 | {{ toYaml .Values.podDisruptionBudget | indent 2 }} 18 | {{- end -}} 19 | -------------------------------------------------------------------------------- /otelcollector/deploy/dependentcharts/kube-state-metrics/templates/podsecuritypolicy.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.podSecurityPolicy.enabled (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") }} 2 | apiVersion: policy/v1beta1 3 | kind: PodSecurityPolicy 4 | metadata: 5 | name: {{ template "kube-state-metrics.fullname" . }} 6 | labels: 7 | {{- include "kube-state-metrics.labels" . | indent 4 }} 8 | {{- if .Values.podSecurityPolicy.annotations }} 9 | annotations: 10 | {{ toYaml .Values.podSecurityPolicy.annotations | indent 4 }} 11 | {{- end }} 12 | spec: 13 | privileged: false 14 | volumes: 15 | - 'secret' 16 | {{- if .Values.podSecurityPolicy.additionalVolumes }} 17 | {{ toYaml .Values.podSecurityPolicy.additionalVolumes | indent 4 }} 18 | {{- end }} 19 | hostNetwork: false 20 | hostIPC: false 21 | hostPID: false 22 | runAsUser: 23 | rule: 'MustRunAsNonRoot' 24 | seLinux: 25 | rule: 'RunAsAny' 26 | supplementalGroups: 27 | rule: 'MustRunAs' 28 | ranges: 29 | # Forbid adding the root group. 30 | - min: 1 31 | max: 65535 32 | fsGroup: 33 | rule: 'MustRunAs' 34 | ranges: 35 | # Forbid adding the root group. 36 | - min: 1 37 | max: 65535 38 | readOnlyRootFilesystem: false 39 | {{- end }} 40 | -------------------------------------------------------------------------------- /otelcollector/deploy/dependentcharts/kube-state-metrics/templates/psp-clusterrole.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.podSecurityPolicy.enabled (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") }} 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | labels: 6 | {{- include "kube-state-metrics.labels" . | indent 4 }} 7 | name: psp-{{ template "kube-state-metrics.fullname" . }} 8 | rules: 9 | {{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} 10 | {{- if semverCompare "> 1.15.0-0" $kubeTargetVersion }} 11 | - apiGroups: ['policy'] 12 | {{- else }} 13 | - apiGroups: ['extensions'] 14 | {{- end }} 15 | resources: ['podsecuritypolicies'] 16 | verbs: ['use'] 17 | resourceNames: 18 | - {{ template "kube-state-metrics.fullname" . }} 19 | {{- end }} 20 | -------------------------------------------------------------------------------- /otelcollector/deploy/dependentcharts/kube-state-metrics/templates/psp-clusterrolebinding.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.podSecurityPolicy.enabled (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") }} 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRoleBinding 4 | metadata: 5 | labels: 6 | {{- include "kube-state-metrics.labels" . | indent 4 }} 7 | name: psp-{{ template "kube-state-metrics.fullname" . }} 8 | roleRef: 9 | apiGroup: rbac.authorization.k8s.io 10 | kind: ClusterRole 11 | name: psp-{{ template "kube-state-metrics.fullname" . }} 12 | subjects: 13 | - kind: ServiceAccount 14 | name: {{ template "kube-state-metrics.serviceAccountName" . }} 15 | namespace: {{ template "kube-state-metrics.namespace" . }} 16 | {{- end }} 17 | -------------------------------------------------------------------------------- /otelcollector/deploy/dependentcharts/kube-state-metrics/templates/rbac-configmap.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.kubeRBACProxy.enabled}} 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: {{ template "kube-state-metrics.fullname" . }}-rbac-config 6 | namespace: {{ template "kube-state-metrics.namespace" . }} 7 | data: 8 | config-file.yaml: |+ 9 | authorization: 10 | resourceAttributes: 11 | namespace: {{ template "kube-state-metrics.namespace" . }} 12 | apiVersion: v1 13 | resource: services 14 | subresource: {{ template "kube-state-metrics.fullname" . }} 15 | name: {{ template "kube-state-metrics.fullname" . }} 16 | {{- end }} -------------------------------------------------------------------------------- /otelcollector/deploy/dependentcharts/kube-state-metrics/templates/rolebinding.yaml: -------------------------------------------------------------------------------- 1 | {{- if and (eq .Values.rbac.create true) (eq .Values.rbac.useClusterRole false) -}} 2 | {{- range (join "," $.Values.namespaces) | split "," }} 3 | --- 4 | apiVersion: rbac.authorization.k8s.io/v1 5 | kind: RoleBinding 6 | metadata: 7 | labels: 8 | {{- include "kube-state-metrics.labels" $ | indent 4 }} 9 | name: {{ template "kube-state-metrics.fullname" $ }} 10 | namespace: {{ . }} 11 | roleRef: 12 | apiGroup: rbac.authorization.k8s.io 13 | kind: Role 14 | {{- if (not $.Values.rbac.useExistingRole) }} 15 | name: {{ template "kube-state-metrics.fullname" $ }} 16 | {{- else }} 17 | name: {{ $.Values.rbac.useExistingRole }} 18 | {{- end }} 19 | subjects: 20 | - kind: ServiceAccount 21 | name: {{ template "kube-state-metrics.serviceAccountName" $ }} 22 | namespace: {{ template "kube-state-metrics.namespace" $ }} 23 | {{- end -}} 24 | {{- end -}} 25 | -------------------------------------------------------------------------------- /otelcollector/deploy/dependentcharts/kube-state-metrics/templates/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.serviceAccount.create -}} 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | labels: 6 | {{- include "kube-state-metrics.labels" . | indent 4 }} 7 | name: {{ template "kube-state-metrics.serviceAccountName" . }} 8 | namespace: {{ template "kube-state-metrics.namespace" . }} 9 | {{- if .Values.serviceAccount.annotations }} 10 | annotations: 11 | {{ toYaml .Values.serviceAccount.annotations | indent 4 }} 12 | {{- end }} 13 | imagePullSecrets: 14 | {{- include "kube-state-metrics.imagePullSecrets" (dict "Values" .Values "imagePullSecrets" .Values.serviceAccount.imagePullSecrets) | indent 2 }} 15 | {{- end -}} 16 | -------------------------------------------------------------------------------- /otelcollector/deploy/dependentcharts/kube-state-metrics/templates/stsdiscovery-role.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.autosharding.enabled .Values.rbac.create -}} 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: Role 4 | metadata: 5 | name: stsdiscovery-{{ template "kube-state-metrics.fullname" . }} 6 | namespace: {{ template "kube-state-metrics.namespace" . }} 7 | labels: 8 | {{- include "kube-state-metrics.labels" . | indent 4 }} 9 | rules: 10 | - apiGroups: 11 | - "" 12 | resources: 13 | - pods 14 | verbs: 15 | - get 16 | - apiGroups: 17 | - apps 18 | resourceNames: 19 | - {{ template "kube-state-metrics.fullname" . }} 20 | resources: 21 | - statefulsets 22 | verbs: 23 | - get 24 | - list 25 | - watch 26 | {{- end }} 27 | -------------------------------------------------------------------------------- /otelcollector/deploy/dependentcharts/kube-state-metrics/templates/stsdiscovery-rolebinding.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.autosharding.enabled .Values.rbac.create -}} 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: RoleBinding 4 | metadata: 5 | name: stsdiscovery-{{ template "kube-state-metrics.fullname" . }} 6 | namespace: {{ template "kube-state-metrics.namespace" . }} 7 | labels: 8 | {{- include "kube-state-metrics.labels" . | indent 4 }} 9 | roleRef: 10 | apiGroup: rbac.authorization.k8s.io 11 | kind: Role 12 | name: stsdiscovery-{{ template "kube-state-metrics.fullname" . }} 13 | subjects: 14 | - kind: ServiceAccount 15 | name: {{ template "kube-state-metrics.serviceAccountName" . }} 16 | namespace: {{ template "kube-state-metrics.namespace" . }} 17 | {{- end }} 18 | -------------------------------------------------------------------------------- /otelcollector/deploy/dependentcharts/prometheus-node-exporter/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *~ 18 | # Various IDEs 19 | .project 20 | .idea/ 21 | *.tmproj 22 | 23 | ci/ 24 | -------------------------------------------------------------------------------- /otelcollector/deploy/dependentcharts/prometheus-node-exporter/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: prometheus-node-exporter 3 | description: A Helm chart for prometheus node-exporter 4 | keywords: 5 | - node-exporter 6 | - prometheus 7 | - exporter 8 | type: application 9 | version: 4.45.2 10 | # renovate: github=prometheus/node_exporter 11 | appVersion: 1.9.1 12 | home: https://github.com/prometheus/node_exporter/ 13 | sources: 14 | - https://github.com/prometheus/node_exporter/ 15 | maintainers: 16 | - name: gianrubio 17 | email: gianrubio@gmail.com 18 | url: https://github.com/gianrubio 19 | - name: zanhsieh 20 | email: zanhsieh@gmail.com 21 | url: https://github.com/zanhsieh 22 | - name: zeritti 23 | email: rootsandtrees@posteo.de 24 | url: https://github.com/zeritti 25 | annotations: 26 | "artifacthub.io/license": Apache-2.0 27 | "artifacthub.io/links": | 28 | - name: Chart Source 29 | url: https://github.com/prometheus-community/helm-charts 30 | -------------------------------------------------------------------------------- /otelcollector/deploy/dependentcharts/prometheus-node-exporter/templates/clusterrole.yaml: -------------------------------------------------------------------------------- 1 | {{- if and (eq .Values.rbac.create true) (eq .Values.kubeRBACProxy.enabled true) -}} 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: {{ include "prometheus-node-exporter.fullname" . }} 6 | labels: 7 | {{- include "prometheus-node-exporter.labels" . | nindent 4 }} 8 | rules: 9 | {{- if $.Values.kubeRBACProxy.enabled }} 10 | - apiGroups: [ "authentication.k8s.io" ] 11 | resources: 12 | - tokenreviews 13 | verbs: [ "create" ] 14 | - apiGroups: [ "authorization.k8s.io" ] 15 | resources: 16 | - subjectaccessreviews 17 | verbs: [ "create" ] 18 | {{- end }} 19 | {{- end -}} 20 | -------------------------------------------------------------------------------- /otelcollector/deploy/dependentcharts/prometheus-node-exporter/templates/clusterrolebinding.yaml: -------------------------------------------------------------------------------- 1 | {{- if and (eq .Values.rbac.create true) (eq .Values.kubeRBACProxy.enabled true) -}} 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRoleBinding 4 | metadata: 5 | labels: 6 | {{- include "prometheus-node-exporter.labels" . | nindent 4 }} 7 | name: {{ template "prometheus-node-exporter.fullname" . }} 8 | roleRef: 9 | apiGroup: rbac.authorization.k8s.io 10 | kind: ClusterRole 11 | {{- if .Values.rbac.useExistingRole }} 12 | name: {{ .Values.rbac.useExistingRole }} 13 | {{- else }} 14 | name: {{ template "prometheus-node-exporter.fullname" . }} 15 | {{- end }} 16 | subjects: 17 | - kind: ServiceAccount 18 | name: {{ template "prometheus-node-exporter.serviceAccountName" . }} 19 | namespace: {{ template "prometheus-node-exporter.namespace" . }} 20 | {{- end -}} 21 | -------------------------------------------------------------------------------- /otelcollector/deploy/dependentcharts/prometheus-node-exporter/templates/endpoints.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.endpoints }} 2 | apiVersion: v1 3 | kind: Endpoints 4 | metadata: 5 | name: {{ include "prometheus-node-exporter.fullname" . }} 6 | namespace: {{ include "prometheus-node-exporter.namespace" . }} 7 | labels: 8 | {{- include "prometheus-node-exporter.labels" . | nindent 4 }} 9 | subsets: 10 | - addresses: 11 | {{- range .Values.endpoints }} 12 | - ip: {{ . }} 13 | {{- end }} 14 | ports: 15 | - name: {{ .Values.service.portName }} 16 | port: 9100 17 | protocol: TCP 18 | {{- end }} 19 | -------------------------------------------------------------------------------- /otelcollector/deploy/dependentcharts/prometheus-node-exporter/templates/extra-manifests.yaml: -------------------------------------------------------------------------------- 1 | {{ range .Values.extraManifests }} 2 | --- 3 | {{ tpl . $ }} 4 | {{ end }} 5 | -------------------------------------------------------------------------------- /otelcollector/deploy/dependentcharts/prometheus-node-exporter/templates/networkpolicy.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.networkPolicy.enabled }} 2 | apiVersion: networking.k8s.io/v1 3 | kind: NetworkPolicy 4 | metadata: 5 | name: {{ include "prometheus-node-exporter.fullname" . }} 6 | namespace: {{ include "prometheus-node-exporter.namespace" . }} 7 | labels: 8 | {{- include "prometheus-node-exporter.labels" $ | nindent 4 }} 9 | {{- with .Values.service.annotations }} 10 | annotations: 11 | {{- toYaml . | nindent 4 }} 12 | {{- end }} 13 | spec: 14 | ingress: 15 | {{- if .Values.networkPolicy.ingress }} 16 | {{- toYaml .Values.networkPolicy.ingress | nindent 4 }} 17 | {{- else }} 18 | - ports: 19 | - port: {{ .Values.service.port }} 20 | {{- end }} 21 | policyTypes: 22 | - Egress 23 | - Ingress 24 | podSelector: 25 | matchLabels: 26 | {{- include "prometheus-node-exporter.selectorLabels" . | nindent 6 }} 27 | {{- end }} 28 | -------------------------------------------------------------------------------- /otelcollector/deploy/dependentcharts/prometheus-node-exporter/templates/psp-clusterrole.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.rbac.create .Values.rbac.pspEnabled (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") }} 2 | kind: ClusterRole 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | metadata: 5 | name: psp-{{ include "prometheus-node-exporter.fullname" . }} 6 | labels: 7 | {{- include "prometheus-node-exporter.labels" . | nindent 4 }} 8 | rules: 9 | - apiGroups: ['extensions'] 10 | resources: ['podsecuritypolicies'] 11 | verbs: ['use'] 12 | resourceNames: 13 | - {{ include "prometheus-node-exporter.fullname" . }} 14 | {{- end }} 15 | -------------------------------------------------------------------------------- /otelcollector/deploy/dependentcharts/prometheus-node-exporter/templates/psp-clusterrolebinding.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.rbac.create .Values.rbac.pspEnabled (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") }} 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRoleBinding 4 | metadata: 5 | name: psp-{{ include "prometheus-node-exporter.fullname" . }} 6 | labels: 7 | {{- include "prometheus-node-exporter.labels" . | nindent 4 }} 8 | roleRef: 9 | apiGroup: rbac.authorization.k8s.io 10 | kind: ClusterRole 11 | name: psp-{{ include "prometheus-node-exporter.fullname" . }} 12 | subjects: 13 | - kind: ServiceAccount 14 | name: {{ include "prometheus-node-exporter.fullname" . }} 15 | namespace: {{ include "prometheus-node-exporter.namespace" . }} 16 | {{- end }} 17 | -------------------------------------------------------------------------------- /otelcollector/deploy/dependentcharts/prometheus-node-exporter/templates/rbac-configmap.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.kubeRBACProxy.enabled}} 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: {{ template "prometheus-node-exporter.fullname" . }}-rbac-config 6 | namespace: {{ include "prometheus-node-exporter.namespace" . }} 7 | data: 8 | config-file.yaml: |+ 9 | authorization: 10 | resourceAttributes: 11 | namespace: {{ template "prometheus-node-exporter.namespace" . }} 12 | apiVersion: v1 13 | resource: services 14 | subresource: {{ template "prometheus-node-exporter.fullname" . }} 15 | name: {{ template "prometheus-node-exporter.fullname" . }} 16 | {{- end }} -------------------------------------------------------------------------------- /otelcollector/deploy/dependentcharts/prometheus-node-exporter/templates/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.rbac.create .Values.serviceAccount.create -}} 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: {{ include "prometheus-node-exporter.serviceAccountName" . }} 6 | namespace: {{ include "prometheus-node-exporter.namespace" . }} 7 | labels: 8 | {{- include "prometheus-node-exporter.labels" . | nindent 4 }} 9 | {{- with .Values.serviceAccount.annotations }} 10 | annotations: 11 | {{- toYaml . | nindent 4 }} 12 | {{- end }} 13 | automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }} 14 | {{- if or .Values.serviceAccount.imagePullSecrets .Values.global.imagePullSecrets }} 15 | imagePullSecrets: 16 | {{- include "prometheus-node-exporter.imagePullSecrets" (dict "Values" .Values "imagePullSecrets" .Values.serviceAccount.imagePullSecrets) | indent 2 }} 17 | {{- end }} 18 | {{- end -}} 19 | -------------------------------------------------------------------------------- /otelcollector/deploy/example-custom-resources/pod-monitor/pod-monitor-reference-app-mtls.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: azmonitoring.coreos.com/v1 2 | kind: PodMonitor 3 | metadata: 4 | name: prometheus-reference-app-job 5 | spec: 6 | labelLimit: 63 7 | labelNameLengthLimit: 511 8 | labelValueLengthLimit: 1023 9 | selector: 10 | matchLabels: 11 | app: prometheus-reference-app 12 | podMetricsEndpoints: 13 | - scheme: https 14 | tlsConfig: 15 | ca: 16 | secret: 17 | key: "client-cert.pem" 18 | name: "ama-metrics-mtls-secret" 19 | cert: 20 | secret: 21 | key: "client-cert.pem" 22 | name: "ama-metrics-mtls-secret" 23 | keySecret: 24 | key: "client-key.pem" 25 | name: "ama-metrics-mtls-secret" 26 | insecureSkipVerify: false 27 | - relabelings: 28 | - sourceLabels: [__meta_kubernetes_pod_label_app] 29 | action: keep 30 | regex: "prometheus-reference-app" 31 | - sourceLabels: [__meta_kubernetes_pod_node_name] 32 | action: replace 33 | regex: ('$$NODE_NAME$$') 34 | targetLabel: instance 35 | -------------------------------------------------------------------------------- /otelcollector/deploy/example-custom-resources/pod-monitor/pod-monitor-reference-app.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: azmonitoring.coreos.com/v1 2 | kind: PodMonitor 3 | metadata: 4 | name: prometheus-reference-app-job 5 | spec: 6 | labelLimit: 63 7 | labelNameLengthLimit: 511 8 | labelValueLengthLimit: 1023 9 | selector: 10 | matchLabels: 11 | app: prometheus-reference-app 12 | podMetricsEndpoints: 13 | - relabelings: 14 | - sourceLabels: [__meta_kubernetes_pod_label_app] 15 | action: keep 16 | regex: "prometheus-reference-app" 17 | - sourceLabels: [__meta_kubernetes_pod_node_name] 18 | action: replace 19 | regex: ('$$NODE_NAME$$') 20 | targetLabel: instance 21 | -------------------------------------------------------------------------------- /otelcollector/deploy/example-custom-resources/service-monitor/service-monitor-reference-app.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: azmonitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | name: prometheus-reference-app-monitor 5 | spec: 6 | selector: 7 | matchLabels: 8 | app: prometheus-reference-app 9 | endpoints: 10 | - port: weather-app 11 | interval: 30s 12 | path: /metrics 13 | scheme: http 14 | - port: untyped-metrics 15 | interval: 30s 16 | path: /metrics 17 | scheme: http 18 | - port: python-client 19 | interval: 30s 20 | path: /metrics 21 | scheme: http 22 | -------------------------------------------------------------------------------- /otelcollector/deploy/retina/custom-files/network-observability-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: network-observability 5 | namespace: kube-system 6 | spec: 7 | ports: 8 | - name: retina 9 | protocol: TCP 10 | port: 10093 11 | targetPort: 10093 12 | -------------------------------------------------------------------------------- /otelcollector/fluent-bit/fluent-bit-parsers.conf: -------------------------------------------------------------------------------- 1 | [PARSER] 2 | Name collector-parser 3 | Format json 4 | Time_Key ts 5 | Time_Keep On 6 | 7 | [PARSER] 8 | Name me-parser 9 | Format regex 10 | Regex ^(?