├── OSSMETADATA
├── project
├── build.properties
├── sbt-launch-1.0.0.jar
├── sbt
├── plugins.sbt
├── SonatypeSettings.scala
├── GitVersion.scala
└── License.scala
├── atlas-slotting
├── src
│ ├── scripts
│ │ └── requirements.txt
│ ├── test
│ │ └── resources
│ │ │ ├── SlottedInstanceDetails-1.json
│ │ │ ├── SlottedInstanceDetails-0.json
│ │ │ ├── SlottedInstanceDetails-2.json
│ │ │ ├── atlas_app-main-all-v001.json
│ │ │ ├── atlas_app-main-all-v002.json
│ │ │ └── atlas_app-main-none-v001.json
│ └── main
│ │ ├── resources
│ │ ├── log4j2.xml
│ │ └── application.conf
│ │ ├── scala
│ │ └── com
│ │ │ └── netflix
│ │ │ └── atlas
│ │ │ └── slotting
│ │ │ ├── Main.scala
│ │ │ ├── SlottingCache.scala
│ │ │ ├── Util.scala
│ │ │ └── AppConfiguration.scala
│ │ └── java
│ │ └── com
│ │ └── netflix
│ │ └── atlas
│ │ └── slotting
│ │ └── Gzip.java
└── .gitignore
├── iep-lwc-cloudwatch
├── README.md
└── src
│ └── main
│ ├── resources
│ ├── log4j2.xml
│ └── application.conf
│ └── scala
│ └── com
│ └── netflix
│ └── iep
│ └── lwc
│ ├── Main.scala
│ ├── AppConfiguration.scala
│ └── StatsApi.scala
├── atlas-cloudwatch
└── src
│ ├── main
│ ├── resources
│ │ ├── META-INF
│ │ │ └── services
│ │ │ │ └── com.google.inject.Module
│ │ ├── billing.conf
│ │ ├── s3.conf
│ │ ├── cloudwatch.conf
│ │ ├── vpn.conf
│ │ ├── events.conf
│ │ ├── trustedadvisor.conf
│ │ ├── s3-replication.conf
│ │ ├── sns.conf
│ │ ├── api-gateway.conf
│ │ ├── sqs.conf
│ │ ├── msk-serverless.conf
│ │ ├── efs.conf
│ │ └── route53.conf
│ ├── protobuf
│ │ └── CloudWatchMetric.proto
│ └── scala
│ │ └── com
│ │ └── netflix
│ │ └── atlas
│ │ ├── cloudwatch
│ │ ├── Tagger.scala
│ │ ├── AwsAccountSupplier.scala
│ │ ├── package.scala
│ │ ├── FirehoseMetric.scala
│ │ ├── poller
│ │ │ └── MetricValue.scala
│ │ ├── MetricMetadata.scala
│ │ ├── ConfigAccountSupplier.scala
│ │ ├── NetflixTagger.scala
│ │ ├── MetricData.scala
│ │ └── CloudWatchRules.scala
│ │ ├── util
│ │ └── XXHasher.scala
│ │ └── webapi
│ │ └── RequestId.scala
│ └── test
│ ├── resources
│ ├── local.conf
│ ├── poll-mgr.conf
│ ├── log4j2.xml
│ └── application.conf
│ └── scala
│ └── com
│ └── netflix
│ └── atlas
│ ├── cloudwatch
│ └── ConfigAccountSupplierSuite.scala
│ └── util
│ └── XXHasherSuite.scala
├── atlas-druid
└── src
│ ├── test
│ └── resources
│ │ ├── groupByResponseHisto.json
│ │ ├── application.conf
│ │ ├── groupByResponseArray.json
│ │ └── groupByResponse.json
│ └── main
│ ├── resources
│ ├── log4j2.xml
│ └── application.conf
│ └── scala
│ └── com
│ └── netflix
│ └── atlas
│ └── druid
│ ├── Main.scala
│ ├── package.scala
│ ├── DruidSort.scala
│ ├── ReduceStepTimeSeq.scala
│ ├── DruidMetadataService.scala
│ ├── AppConfiguration.scala
│ └── ExplainApi.scala
├── iep-lwc-cloudwatch-model
├── README.md
└── src
│ └── main
│ └── scala
│ └── com
│ └── netflix
│ └── iep
│ └── lwc
│ └── fwd
│ └── cw
│ ├── ExpressionId.scala
│ ├── ConfigBinVersion.scala
│ ├── Report.scala
│ └── ClusterConfig.scala
├── iep-archaius
├── README.md
└── src
│ ├── main
│ ├── resources
│ │ ├── application.conf
│ │ └── log4j2.xml
│ └── scala
│ │ └── com
│ │ └── netflix
│ │ └── iep
│ │ └── archaius
│ │ ├── Main.scala
│ │ ├── package.scala
│ │ ├── AppConfiguration.scala
│ │ ├── DynamoService.scala
│ │ ├── PropertiesLoader.scala
│ │ └── PropertiesContext.scala
│ └── test
│ └── scala
│ └── com
│ └── netflix
│ └── iep
│ └── archaius
│ └── MockDynamoDB.scala
├── .github
├── dependabot.yml
└── workflows
│ ├── pr.yml
│ ├── snapshot.yml
│ └── release.yml
├── atlas-aggregator
├── src
│ ├── test
│ │ ├── resources
│ │ │ └── application.conf
│ │ └── scala
│ │ │ └── com
│ │ │ └── netflix
│ │ │ └── atlas
│ │ │ └── aggregator
│ │ │ ├── AggrConfigSuite.scala
│ │ │ └── AppConfigurationSuite.scala
│ └── main
│ │ ├── resources
│ │ ├── log4j2.xml
│ │ └── application.conf
│ │ └── scala
│ │ └── com
│ │ └── netflix
│ │ └── atlas
│ │ └── aggregator
│ │ ├── Main.scala
│ │ ├── Aggregator.scala
│ │ ├── AtlasAggregatorService.scala
│ │ ├── CaffeineCache.scala
│ │ ├── FailureMessage.scala
│ │ └── AppConfiguration.scala
└── README.md
├── iep-lwc-bridge
├── README.md
└── src
│ └── main
│ ├── resources
│ ├── log4j2.xml
│ └── application.conf
│ └── scala
│ └── com
│ └── netflix
│ └── iep
│ └── lwc
│ ├── Main.scala
│ ├── package.scala
│ ├── StatsApi.scala
│ └── AppConfiguration.scala
├── atlas-stream
└── src
│ ├── main
│ ├── resources
│ │ └── application.conf
│ └── scala
│ │ └── com
│ │ └── netflix
│ │ └── atlas
│ │ └── stream
│ │ ├── Main.scala
│ │ └── AppConfiguration.scala
│ └── test
│ ├── resources
│ └── log4j2.xml
│ └── scala
│ └── com
│ └── netflix
│ └── atlas
│ └── stream
│ └── EvalFlowSuite.scala
├── iep-lwc-fwding-admin
├── README.md
├── schema
│ ├── ExpressionDetails.json
│ └── README.md
└── src
│ ├── main
│ ├── resources
│ │ ├── log4j2.xml
│ │ ├── application.conf
│ │ └── cw-fwding-cfg-schema.json
│ └── scala
│ │ └── com
│ │ └── netflix
│ │ └── iep
│ │ └── lwc
│ │ └── fwd
│ │ └── admin
│ │ ├── Main.scala
│ │ ├── Timer.scala
│ │ ├── SchemaValidation.scala
│ │ ├── ExprInterpreter.scala
│ │ └── AppConfiguration.scala
│ └── test
│ └── scala
│ └── com
│ └── netflix
│ └── iep
│ └── lwc
│ └── fwd
│ └── admin
│ ├── TestAssertions.scala
│ ├── ExpressionDetailsDaoTestImpl.scala
│ ├── ScalingPoliciesTestImpl.scala
│ ├── CwForwardingConfigSuite.scala
│ ├── ExprInterpreterSuite.scala
│ ├── TimerSuite.scala
│ └── ValidationSuite.scala
├── iep-lwc-loadgen
├── src
│ ├── main
│ │ ├── resources
│ │ │ ├── application.conf
│ │ │ └── log4j2.xml
│ │ └── scala
│ │ │ └── com
│ │ │ └── netflix
│ │ │ └── iep
│ │ │ └── loadgen
│ │ │ ├── Main.scala
│ │ │ └── AppConfiguration.scala
│ └── test
│ │ └── scala
│ │ └── com
│ │ └── netflix
│ │ └── iep
│ │ └── loadgen
│ │ └── LoadGenServiceSuite.scala
└── README.md
├── .gitignore
├── atlas-persistence
├── README.md
└── src
│ ├── main
│ ├── resources
│ │ ├── datapoint.avsc
│ │ ├── log4j2.xml
│ │ └── application.conf
│ └── scala
│ │ └── com
│ │ └── netflix
│ │ └── atlas
│ │ └── persistence
│ │ ├── Main.scala
│ │ ├── FileUtil.scala
│ │ ├── AppConfiguration.scala
│ │ ├── S3CopyUtils.scala
│ │ └── PersistenceApi.scala
│ └── test
│ └── scala
│ └── com
│ └── netflix
│ └── atlas
│ └── persistence
│ ├── S3CopySinkSuite.scala
│ └── S3CopyUtilsSuite.scala
├── README.md
├── .scalafmt.conf
└── Makefile
/OSSMETADATA:
--------------------------------------------------------------------------------
1 | osslifecycle=active
2 |
--------------------------------------------------------------------------------
/project/build.properties:
--------------------------------------------------------------------------------
1 | sbt.version=1.11.7
2 |
--------------------------------------------------------------------------------
/atlas-slotting/src/scripts/requirements.txt:
--------------------------------------------------------------------------------
1 | boto3==1.8.2
2 | requests==2.32.4
3 |
--------------------------------------------------------------------------------
/atlas-slotting/.gitignore:
--------------------------------------------------------------------------------
1 | env.bash
2 | setup-localdev.sh
3 | worksheet.sc
4 | project/
5 |
--------------------------------------------------------------------------------
/iep-lwc-cloudwatch/README.md:
--------------------------------------------------------------------------------
1 |
2 | Experiment of forwarding the results of LWC expressions to CloudWatch.
3 |
4 |
--------------------------------------------------------------------------------
/project/sbt-launch-1.0.0.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Netflix-Skunkworks/iep-apps/main/project/sbt-launch-1.0.0.jar
--------------------------------------------------------------------------------
/atlas-cloudwatch/src/main/resources/META-INF/services/com.google.inject.Module:
--------------------------------------------------------------------------------
1 | com.netflix.atlas.spring.CloudWatchConfiguration
--------------------------------------------------------------------------------
/atlas-druid/src/test/resources/groupByResponseHisto.json:
--------------------------------------------------------------------------------
1 | [
2 | [1553786700000,null],
3 | [1553786700000,{"0": 0, "2": 0, "42": 1, "43": 1, "44": 1}]
4 | ]
--------------------------------------------------------------------------------
/iep-lwc-cloudwatch-model/README.md:
--------------------------------------------------------------------------------
1 | ## LWC CloudWatch Model
2 |
3 | Data model classes that are used in `iep-lwc-cloudwatch` and
4 | `iep-lwc-fwding-admin`
5 |
--------------------------------------------------------------------------------
/atlas-druid/src/test/resources/application.conf:
--------------------------------------------------------------------------------
1 | atlas {
2 | // URI for the druid service
3 | druid {
4 | uri = "http://localhost:7103/druid/v2"
5 | }
6 | }
--------------------------------------------------------------------------------
/iep-archaius/README.md:
--------------------------------------------------------------------------------
1 |
2 | Sample property service that can be used with
3 | [iep-module-archaius2](https://github.com/Netflix/iep/tree/master/iep-module-archaius2).
4 |
--------------------------------------------------------------------------------
/.github/dependabot.yml:
--------------------------------------------------------------------------------
1 | version: 2
2 | updates:
3 | - package-ecosystem: "github-actions"
4 | directory: "/"
5 | target-branch: "main"
6 | schedule:
7 | interval: "monthly"
8 |
--------------------------------------------------------------------------------
/atlas-aggregator/src/test/resources/application.conf:
--------------------------------------------------------------------------------
1 |
2 | netflix.atlas.aggr.registry.atlas.lwc.step = "PT1M"
3 |
4 | // Used for atlas.aggr tag in tests
5 | netflix.iep.env.instance-id = "i-123"
--------------------------------------------------------------------------------
/iep-lwc-bridge/README.md:
--------------------------------------------------------------------------------
1 |
2 | Bridge between from Publish API to LWC API. The goal is to allow existing clients
3 | that are sending in data via the Publish API to be accessible via LWC.
4 |
5 |
6 |
--------------------------------------------------------------------------------
/atlas-cloudwatch/src/test/resources/local.conf:
--------------------------------------------------------------------------------
1 |
2 | atlas.poller {
3 |
4 | frequency = 10 s
5 |
6 | pollers = [
7 | {
8 | name = "cloudwatch"
9 | class = "com.netflix.atlas.cloudwatch.CloudWatchPoller"
10 | }
11 | ]
12 | }
--------------------------------------------------------------------------------
/atlas-druid/src/test/resources/groupByResponseArray.json:
--------------------------------------------------------------------------------
1 | [
2 | [1553786700000,null,0],
3 | [1553786700000,"",0],
4 | [1553786700000,"T0000",0],
5 | [1553786700000,"T0002",0],
6 | [1553786700000,"T002A",1],
7 | [1553786700000,"T002B",1],
8 | [1553786700000,"T002C",1]
9 | ]
--------------------------------------------------------------------------------
/atlas-aggregator/README.md:
--------------------------------------------------------------------------------
1 |
2 | ## Description
3 |
4 | > :warning: Experimental
5 |
6 | Prototype for a service that receives delta updates from clients and applies them to a local
7 | Atlas registry. This can be used with shortlived systems like FaaS where the state cannot be
8 | maintained locally for the full step interval.
--------------------------------------------------------------------------------
/atlas-slotting/src/test/resources/SlottedInstanceDetails-1.json:
--------------------------------------------------------------------------------
1 | {
2 | "instanceId": "i-002",
3 | "privateIpAddress": "192.168.1.2",
4 | "publicIpAddress": "10.0.0.2",
5 | "slot": 1,
6 | "launchTime": 1555430010290,
7 | "imageId": "ami-001",
8 | "instanceType": "r4.large",
9 | "availabilityZone": "us-west-2b",
10 | "lifecycleState": "InService"
11 | }
--------------------------------------------------------------------------------
/atlas-stream/src/main/resources/application.conf:
--------------------------------------------------------------------------------
1 |
2 | atlas.pekko {
3 | api-endpoints = [
4 | "com.netflix.atlas.pekko.ConfigApi",
5 | "com.netflix.atlas.pekko.HealthcheckApi",
6 | "com.netflix.atlas.stream.StreamApi"
7 | ]
8 | }
9 |
10 | atlas.stream {
11 | eval-service.queue-size = 10000
12 | max-datasources-per-session = 100
13 | max-datasources-total = 2000
14 | }
--------------------------------------------------------------------------------
/iep-lwc-fwding-admin/README.md:
--------------------------------------------------------------------------------
1 | ## LWC Metrics Forwarding Admin Service
2 |
3 | Service for managing the forwarding configurations.
4 |
5 | - Expression validation API that will be hooked on to Configbin Create/Update
6 | API.
7 | - Mark the expressions that are not forwarding any data and the ones with
8 | no scaling policy attached.
9 | - Have support for removing the flagged expressions.
10 |
--------------------------------------------------------------------------------
/atlas-slotting/src/test/resources/SlottedInstanceDetails-0.json:
--------------------------------------------------------------------------------
1 | {
2 | "instanceId": "i-001",
3 | "ipv6Address": "0:0:0:0:0:FFFF:C0A8:0101",
4 | "privateIpAddress": "192.168.1.1",
5 | "publicIpAddress": "10.0.0.1",
6 | "slot": 0,
7 | "launchTime": 1555430010290,
8 | "imageId": "ami-001",
9 | "instanceType": "r4.large",
10 | "availabilityZone": "us-west-2b",
11 | "lifecycleState": "InService"
12 | }
--------------------------------------------------------------------------------
/atlas-slotting/src/test/resources/SlottedInstanceDetails-2.json:
--------------------------------------------------------------------------------
1 | {
2 | "instanceId": "i-003",
3 | "ipv6Address": "0:0:0:0:0:FFFF:C0A8:0103",
4 | "privateIpAddress": "192.168.1.3",
5 | "publicIpAddress": "10.0.0.3",
6 | "slot": 2,
7 | "launchTime": 1555430010290,
8 | "imageId": "ami-001",
9 | "instanceType": "r4.large",
10 | "availabilityZone": "us-west-2b",
11 | "lifecycleState": "InService"
12 | }
--------------------------------------------------------------------------------
/iep-lwc-loadgen/src/main/resources/application.conf:
--------------------------------------------------------------------------------
1 |
2 | iep.lwc.loadgen {
3 | step = 60s
4 | uris = [
5 | ]
6 | }
7 |
8 | atlas {
9 | pekko {
10 | api-endpoints = [
11 | "com.netflix.atlas.pekko.ConfigApi",
12 | "com.netflix.atlas.pekko.HealthcheckApi"
13 | ]
14 | }
15 | }
16 |
17 | // User specific configuration with settings for an internal deployment
18 | include "custom.conf"
19 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 |
2 | # Tmp files created by vim
3 | *.swp
4 |
5 | # Compiled sources
6 | *.class
7 |
8 | # Anything in target dirs
9 | build
10 | gh-pages-build
11 | target
12 |
13 | # Ivy caches
14 | .bsp
15 | .gradle
16 | .ivy2
17 | .ivy2.cache
18 | .sbt
19 |
20 | # Intellij
21 | *.iml
22 | *.ipr
23 | *.iws
24 |
25 | # anything in .idea dirs
26 | .idea
27 | .idea_modules
28 | out
29 |
30 | # python virtualenv
31 | venv
32 |
--------------------------------------------------------------------------------
/atlas-persistence/README.md:
--------------------------------------------------------------------------------
1 | ## Atlas Persistence
2 | Receive Atlas data points, batch to local files and save them to S3.
3 |
4 | #### Java code generation from avro schema
5 | - Avro code generation is a dependent step of sbt compile, so it will be executed by:
6 | ```sbt compile```
7 | - You can also only run code generation with: ```sbt avroGenerate```
8 | - The generated Java Source Code can be found at: ```atlas-persistence/target/scala-*/src_managed```
9 |
--------------------------------------------------------------------------------
/atlas-cloudwatch/src/test/resources/poll-mgr.conf:
--------------------------------------------------------------------------------
1 | atlas.poller {
2 |
3 | // Set to a large value because we don't want it running during tests
4 | frequency = 60 minutes
5 |
6 | sink = {
7 | class = "com.netflix.atlas.poller.TestActor"
8 | url = "http://localhost:7101/api/v1/publish"
9 | send-ack = true
10 | }
11 |
12 | pollers = [
13 | {
14 | name = "test"
15 | class = "com.netflix.atlas.poller.TestActor"
16 | }
17 | ]
18 | }
--------------------------------------------------------------------------------
/atlas-persistence/src/main/resources/datapoint.avsc:
--------------------------------------------------------------------------------
1 | {
2 | "namespace": "com.netflix.atlas.persistence",
3 | "type": "record",
4 | "name": "AvroDatapoint",
5 | "fields": [
6 | {
7 | "name": "tags",
8 | "type": {
9 | "type": "map",
10 | "values": "string"
11 | }
12 | },
13 | {
14 | "name": "timestamp",
15 | "type": "long"
16 | },
17 | {
18 | "name": "value",
19 | "type": "double"
20 | }
21 | ]
22 | }
--------------------------------------------------------------------------------
/iep-lwc-fwding-admin/schema/ExpressionDetails.json:
--------------------------------------------------------------------------------
1 | {
2 | "TableName": "iep.lwc.fwd.cw.ExpressionDetails",
3 |
4 | "KeySchema": [
5 | {
6 | "KeyType": "HASH",
7 | "AttributeName": "ExpressionId"
8 | }
9 | ],
10 |
11 | "AttributeDefinitions": [
12 | {
13 | "AttributeName": "ExpressionId",
14 | "AttributeType": "S"
15 | }
16 | ],
17 |
18 | "ProvisionedThroughput": {
19 | "WriteCapacityUnits": 10,
20 | "ReadCapacityUnits": 10
21 | }
22 | }
--------------------------------------------------------------------------------
/iep-archaius/src/main/resources/application.conf:
--------------------------------------------------------------------------------
1 |
2 | netflix.iep.archaius {
3 | use-dynamic = false
4 | sync-init = false
5 | table = "atlas.deploy.dynamic.properties"
6 | }
7 |
8 | atlas.pekko {
9 | api-endpoints = [
10 | "com.netflix.atlas.pekko.ConfigApi",
11 | "com.netflix.atlas.pekko.HealthcheckApi",
12 | "com.netflix.iep.archaius.PropertiesApi"
13 | ]
14 |
15 | actors = ${?atlas.pekko.actors} [
16 | {
17 | name = "props-refresh"
18 | class = "com.netflix.iep.archaius.PropertiesLoader"
19 | }
20 | ]
21 | }
22 |
23 |
--------------------------------------------------------------------------------
/project/sbt:
--------------------------------------------------------------------------------
1 | # This script is used to launch sbt on developer workstations.
2 |
3 | # Additional options to add in when calling java
4 | OPTIONS=""
5 |
6 | # On jenkins the colors should be disabled for the console log
7 | [ -n "$BUILD_ID" ] && OPTIONS="$OPTIONS -Dsbt.log.noformat=true"
8 |
9 | java \
10 | -XX:+UseG1GC \
11 | -Xms2g \
12 | -Xmx2g \
13 | -XX:ReservedCodeCacheSize=128m \
14 | -XX:+UseCodeCacheFlushing \
15 | -Dsbt.boot.directory=${WORKSPACE:-$HOME}/.sbt \
16 | -Dsbt.ivy.home=${WORKSPACE:-$HOME}/.ivy2 \
17 | -Dsbt.gigahorse=false \
18 | $OPTIONS \
19 | -jar `dirname $0`/sbt-launch-1.0.0.jar "$@"
20 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 | [](https://github.com/Netflix-Skunkworks/iep-apps/actions/workflows/snapshot.yml) [](https://github.com/Netflix-Skunkworks/iep-apps/actions/workflows/release.yml)
3 |
4 | Small example apps using Netflix Insight libraries from the
5 | [Spectator](https://github.com/Netflix/spectator),
6 | [Atlas](https://github.com/Netflix/atlas), and
7 | [IEP](https://github.com/Netflix/iep) projects. See the readme
8 | for individual sub projects to get more information.
9 |
--------------------------------------------------------------------------------
/atlas-druid/src/main/resources/log4j2.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | %d{yyyy-MM-dd'T'HH:mm:ss.SSS} %-5level [%t] %class: %msg%n
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
--------------------------------------------------------------------------------
/iep-lwc-fwding-admin/schema/README.md:
--------------------------------------------------------------------------------
1 | ```text
2 | iep.lwc.fwd.cw.ExpressionDetails {
3 |
4 | // use ExpressionId
5 | partition key: ExpressionId
6 |
7 | // Config key and expression
8 | ExpressionId: string
9 |
10 | // report timestamp
11 | Timestamp: number
12 |
13 | // FwdMetricInfo
14 | FwdMetricInfo: string
15 |
16 | // Throwable
17 | Error: string
18 |
19 | // name -> timestamp
20 | Events: Map[string, number]
21 |
22 | // scaling policy
23 | ScalingPolicy: string
24 | }
25 | ```
26 |
27 | ```
28 | aws dynamodb create-table --region us-east-1 --cli-input-json file://ExpressionDetails.json
29 | ```
--------------------------------------------------------------------------------
/atlas-aggregator/src/main/resources/log4j2.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | %d{yyyy-MM-dd'T'HH:mm:ss.SSS} %-5level [%t] %class: %msg%n
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
--------------------------------------------------------------------------------
/atlas-persistence/src/main/resources/log4j2.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | %d{yyyy-MM-dd'T'HH:mm:ss.SSS} %-5level [%C{1}]: %msg%n
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
--------------------------------------------------------------------------------
/iep-lwc-loadgen/src/main/resources/log4j2.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | %d{yyyy-MM-dd'T'HH:mm:ss.SSS} %-5level [%t] %class: %msg%n
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
--------------------------------------------------------------------------------
/atlas-cloudwatch/src/main/resources/billing.conf:
--------------------------------------------------------------------------------
1 |
2 | atlas {
3 | cloudwatch {
4 |
5 | // http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/monitor_estimated_charges_with_cloudwatch.html
6 | // Not enabled by default.
7 | billing = {
8 | namespace = "AWS/Billing"
9 | period = 6h
10 | end-period-offset = 1
11 | period-count = 2
12 |
13 | dimensions = [
14 | "LinkedAccount",
15 | "ServiceName",
16 | "Currency"
17 | ]
18 |
19 | metrics = [
20 | {
21 | name = "EstimatedCharges"
22 | alias = "aws.billing.estimatedMonthlyCharge"
23 | conversion = "sum"
24 | }
25 | ]
26 | }
27 | }
28 | }
--------------------------------------------------------------------------------
/atlas-cloudwatch/src/main/resources/s3.conf:
--------------------------------------------------------------------------------
1 |
2 | atlas {
3 | cloudwatch {
4 |
5 | // https://docs.aws.amazon.com/AmazonS3/latest/userguide/metrics-dimensions.html
6 | s3 = {
7 | namespace = "AWS/S3"
8 | period = 1d
9 | end-period-offset = 4
10 |
11 | dimensions = [
12 | "BucketName",
13 | "StorageType"
14 | ]
15 |
16 | metrics = [
17 | {
18 | name = "BucketSizeBytes"
19 | alias = "aws.s3.bucketSizeBytes"
20 | conversion = "max"
21 | },
22 | {
23 | name = "NumberOfObjects"
24 | alias = "aws.s3.numberOfObjects"
25 | conversion = "max"
26 | }
27 | ]
28 | }
29 | }
30 | }
--------------------------------------------------------------------------------
/atlas-slotting/src/main/resources/log4j2.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | %d{yyyy-MM-dd'T'HH:mm:ss.SSS} %-5level [%t] %class: %msg%n
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
--------------------------------------------------------------------------------
/project/plugins.sbt:
--------------------------------------------------------------------------------
1 | addSbtPlugin("com.github.sbt" % "sbt-pgp" % "2.3.1")
2 | addSbtPlugin("com.github.sbt" % "sbt-release" % "1.4.0")
3 | addSbtPlugin("pl.project13.scala" % "sbt-jmh" % "0.4.7")
4 | addSbtPlugin("com.github.sbt" % "sbt-git" % "2.1.0")
5 | addSbtPlugin("com.typesafe.sbt" % "sbt-native-packager" % "1.3.12")
6 |
7 | addSbtPlugin("org.scalameta" % "sbt-scalafmt" % "2.4.6")
8 |
9 | // for compiling protobuf in the Cloud Watch module
10 | addSbtPlugin("com.github.sbt" % "sbt-protobuf" % "0.8.0")
11 |
12 | // Convenient helpers, not required
13 | addSbtPlugin("com.timushev.sbt" % "sbt-updates" % "0.6.4")
14 |
--------------------------------------------------------------------------------
/atlas-cloudwatch/src/main/resources/cloudwatch.conf:
--------------------------------------------------------------------------------
1 | atlas {
2 | cloudwatch {
3 |
4 | # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-metric-streams-monitoring.html
5 | cw-metric-streams = {
6 | namespace = "AWS/CloudWatch/MetricStreams"
7 | period = 1m
8 | end-period-offset = 3
9 |
10 | dimensions = [
11 | "MetricStreamName"
12 | ]
13 |
14 | metrics = [
15 | {
16 | name = "TotalMetricUpdate"
17 | alias = "aws.cw.stream.metricUpdates"
18 | conversion = "sum,rate"
19 | },
20 | {
21 | name = "PublishErrorRate"
22 | alias = "aws.cw.stream.publishErrors"
23 | conversion = "sum,rate"
24 | }
25 | ]
26 | }
27 | }
28 | }
--------------------------------------------------------------------------------
/atlas-stream/src/test/resources/log4j2.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | %d{yyyy-MM-dd'T'HH:mm:ss.SSS} %-5level [%t] %class: %msg%n
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
--------------------------------------------------------------------------------
/iep-archaius/src/main/resources/log4j2.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | %d{yyyy-MM-dd'T'HH:mm:ss.SSS} %-5level [%t] %class: %msg%n
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
--------------------------------------------------------------------------------
/iep-lwc-bridge/src/main/resources/log4j2.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | %d{yyyy-MM-dd'T'HH:mm:ss.SSS} %-5level [%t] %class: %msg%n
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
--------------------------------------------------------------------------------
/atlas-cloudwatch/src/test/resources/log4j2.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | %d{yyyy-MM-dd'T'HH:mm:ss.SSS} %-5level [%t] %class: %msg%n
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
--------------------------------------------------------------------------------
/iep-lwc-cloudwatch/src/main/resources/log4j2.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | %d{yyyy-MM-dd'T'HH:mm:ss.SSS} %-5level [%t] %class: %msg%n
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
--------------------------------------------------------------------------------
/iep-lwc-fwding-admin/src/main/resources/log4j2.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | %d{yyyy-MM-dd'T'HH:mm:ss.SSS} %-5level [%t] %class: %msg%n
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
--------------------------------------------------------------------------------
/iep-lwc-bridge/src/main/resources/application.conf:
--------------------------------------------------------------------------------
1 |
2 | netflix.iep.lwc.bridge {
3 | config-uri = "http://localhost:7101/lwc/api/v1/expressions"
4 | eval-uri = "http://localhost:7101/lwc/api/v1/evaluate"
5 |
6 | logging {
7 | // Subscription ids to log in detail for better debugging
8 | subscriptions = []
9 | }
10 | }
11 |
12 | atlas {
13 | pekko {
14 | api-endpoints = [
15 | "com.netflix.atlas.pekko.ConfigApi",
16 | "com.netflix.atlas.pekko.HealthcheckApi",
17 | "com.netflix.iep.lwc.BridgeApi",
18 | "com.netflix.iep.lwc.StatsApi"
19 | ]
20 | }
21 |
22 | webapi.publish {
23 | // This is just a pass through, do not intern the values...
24 | intern-while-parsing = false
25 | }
26 | }
27 |
28 | // User specific configuration with settings for an internal deployment
29 | include "custom.conf"
30 |
--------------------------------------------------------------------------------
/iep-lwc-cloudwatch-model/src/main/scala/com/netflix/iep/lwc/fwd/cw/ExpressionId.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2014-2025 Netflix, Inc.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.netflix.iep.lwc.fwd.cw
17 |
18 | case class ExpressionId(key: String, expression: ForwardingExpression)
19 |
--------------------------------------------------------------------------------
/iep-lwc-bridge/src/main/scala/com/netflix/iep/lwc/Main.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2014-2025 Netflix, Inc.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.netflix.iep.lwc
17 |
18 | object Main {
19 |
20 | def main(args: Array[String]): Unit = {
21 | com.netflix.iep.spring.Main.main(args)
22 | }
23 | }
24 |
--------------------------------------------------------------------------------
/atlas-druid/src/main/scala/com/netflix/atlas/druid/Main.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2014-2025 Netflix, Inc.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.netflix.atlas.druid
17 |
18 | object Main {
19 |
20 | def main(args: Array[String]): Unit = {
21 | com.netflix.iep.spring.Main.main(args)
22 | }
23 | }
24 |
--------------------------------------------------------------------------------
/iep-lwc-cloudwatch/src/main/scala/com/netflix/iep/lwc/Main.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2014-2025 Netflix, Inc.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.netflix.iep.lwc
17 |
18 | object Main {
19 |
20 | def main(args: Array[String]): Unit = {
21 | com.netflix.iep.spring.Main.main(args)
22 | }
23 | }
24 |
--------------------------------------------------------------------------------
/atlas-stream/src/main/scala/com/netflix/atlas/stream/Main.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2014-2025 Netflix, Inc.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.netflix.atlas.stream
17 |
18 | object Main {
19 |
20 | def main(args: Array[String]): Unit = {
21 | com.netflix.iep.spring.Main.main(args)
22 | }
23 | }
24 |
--------------------------------------------------------------------------------
/iep-archaius/src/main/scala/com/netflix/iep/archaius/Main.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2014-2025 Netflix, Inc.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.netflix.iep.archaius
17 |
18 | object Main {
19 |
20 | def main(args: Array[String]): Unit = {
21 | com.netflix.iep.spring.Main.run(args)
22 | }
23 | }
24 |
--------------------------------------------------------------------------------
/iep-lwc-loadgen/src/main/scala/com/netflix/iep/loadgen/Main.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2014-2025 Netflix, Inc.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.netflix.iep.loadgen
17 |
18 | object Main {
19 |
20 | def main(args: Array[String]): Unit = {
21 | com.netflix.iep.spring.Main.main(args)
22 | }
23 | }
24 |
--------------------------------------------------------------------------------
/atlas-slotting/src/main/scala/com/netflix/atlas/slotting/Main.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2014-2025 Netflix, Inc.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.netflix.atlas.slotting
17 |
18 | object Main {
19 |
20 | def main(args: Array[String]): Unit = {
21 | com.netflix.iep.spring.Main.main(args)
22 | }
23 | }
24 |
--------------------------------------------------------------------------------
/atlas-aggregator/src/main/scala/com/netflix/atlas/aggregator/Main.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2014-2025 Netflix, Inc.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.netflix.atlas.aggregator
17 |
18 | object Main {
19 |
20 | def main(args: Array[String]): Unit = {
21 | com.netflix.iep.spring.Main.main(args)
22 | }
23 | }
24 |
--------------------------------------------------------------------------------
/atlas-persistence/src/main/scala/com/netflix/atlas/persistence/Main.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2014-2025 Netflix, Inc.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.netflix.atlas.persistence
17 |
18 | object Main {
19 |
20 | def main(args: Array[String]): Unit = {
21 | com.netflix.iep.spring.Main.main(args)
22 | }
23 | }
24 |
--------------------------------------------------------------------------------
/iep-lwc-fwding-admin/src/main/scala/com/netflix/iep/lwc/fwd/admin/Main.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2014-2025 Netflix, Inc.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.netflix.iep.lwc.fwd.admin
17 |
18 | object Main {
19 |
20 | def main(args: Array[String]): Unit = {
21 | com.netflix.iep.spring.Main.main(args)
22 | }
23 | }
24 |
--------------------------------------------------------------------------------
/iep-lwc-cloudwatch-model/src/main/scala/com/netflix/iep/lwc/fwd/cw/ConfigBinVersion.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2014-2025 Netflix, Inc.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.netflix.iep.lwc.fwd.cw
17 |
18 | case class ConfigBinVersion(
19 | ts: Long,
20 | hash: String,
21 | user: Option[String] = None,
22 | comment: Option[String] = None
23 | )
24 |
--------------------------------------------------------------------------------
/atlas-aggregator/src/main/scala/com/netflix/atlas/aggregator/Aggregator.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2014-2025 Netflix, Inc.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.netflix.atlas.aggregator
17 |
18 | import com.netflix.spectator.api.Id
19 |
20 | trait Aggregator {
21 |
22 | def add(id: Id, value: Double): Unit
23 | def max(id: Id, value: Double): Unit
24 | }
25 |
--------------------------------------------------------------------------------
/atlas-cloudwatch/src/main/protobuf/CloudWatchMetric.proto:
--------------------------------------------------------------------------------
1 | syntax = "proto2";
2 |
3 | package com.netflix.atlas.cloudwatch;
4 |
5 | option java_multiple_files = true;
6 | option java_package = "com.netflix.atlas.cloudwatch";
7 | option java_outer_classname = "CloudWatchMetric";
8 |
9 | message CloudWatchDimension {
10 | required string name = 1;
11 | required string value = 2;
12 | }
13 |
14 | message CloudWatchValue {
15 | required int64 timestamp = 1; // epoch millis
16 | optional double sum = 2;
17 | optional double min = 3;
18 | optional double max = 4;
19 | optional double count = 5;
20 | optional int64 updateTimestamp = 6; // 60s normalized epoch millis
21 | optional bool published = 7;
22 | }
23 |
24 | message CloudWatchCacheEntry {
25 | required string namespace = 1;
26 | required string metric = 2;
27 | required string unit = 3;
28 | repeated CloudWatchDimension dimensions = 4;
29 | repeated CloudWatchValue data = 5;
30 | }
--------------------------------------------------------------------------------
/.github/workflows/pr.yml:
--------------------------------------------------------------------------------
1 | name: PR Build
2 |
3 | on: [pull_request]
4 |
5 | jobs:
6 | build:
7 | runs-on: ubuntu-latest
8 | strategy:
9 | matrix:
10 | java: [25]
11 | scala: [3.7.4]
12 | steps:
13 | - uses: actions/checkout@v5
14 | - name: Set up JDK ${{ matrix.java }}
15 | uses: actions/setup-java@v5
16 | with:
17 | java-version: ${{ matrix.java }}
18 | distribution: 'zulu'
19 | cache: 'sbt'
20 | - name: Build
21 | run: cat /dev/null | project/sbt ++${{ matrix.scala }} clean test
22 | check:
23 | runs-on: ubuntu-latest
24 | steps:
25 | - uses: actions/checkout@v5
26 | - name: Set up JDK 25
27 | uses: actions/setup-java@v5
28 | with:
29 | java-version: 25
30 | distribution: 'zulu'
31 | cache: 'sbt'
32 | - name: Build
33 | run: cat /dev/null | project/sbt checkLicenseHeaders scalafmtCheckAll
34 |
--------------------------------------------------------------------------------
/atlas-slotting/src/main/scala/com/netflix/atlas/slotting/SlottingCache.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2014-2025 Netflix, Inc.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.netflix.atlas.slotting
17 |
18 | import scala.collection.immutable.SortedMap
19 |
20 | class SlottingCache {
21 |
22 | @volatile
23 | var asgs: SortedMap[String, SlottedAsgDetails] = SortedMap.empty[String, SlottedAsgDetails]
24 | }
25 |
--------------------------------------------------------------------------------
/atlas-slotting/src/main/resources/application.conf:
--------------------------------------------------------------------------------
1 | atlas.pekko {
2 | api-endpoints = [
3 | "com.netflix.atlas.pekko.ConfigApi",
4 | "com.netflix.atlas.pekko.HealthcheckApi",
5 | "com.netflix.atlas.slotting.SlottingApi"
6 | ]
7 | }
8 |
9 | aws {
10 | autoscaling {
11 | crawl-interval = 60 s
12 | page-size = 100
13 | }
14 |
15 | ec2 {
16 | crawl-interval = 60 s
17 | page-size = 1000
18 | }
19 |
20 | dynamodb {
21 | table-name = "atlas_slotting-"${netflix.iep.env.stack}
22 |
23 | read-capacity {
24 | default = 15
25 | # add $env.$region scoped values here
26 | }
27 |
28 | write-capacity {
29 | default = 15
30 | # add $env.$region scoped values here
31 | }
32 | }
33 | }
34 |
35 | slotting {
36 | app-names = [
37 | ]
38 |
39 | cache-load-interval = 30 s
40 | cutoff-interval = 7 d
41 | janitor-interval = 24 h
42 | }
43 |
44 | pekko.http.caching.lfu-cache {
45 | time-to-live = 10s
46 | time-to-idle = 10s
47 | }
48 |
--------------------------------------------------------------------------------
/.scalafmt.conf:
--------------------------------------------------------------------------------
1 | version = 3.7.4
2 | runner.dialect = Scala213Source3
3 |
4 | style = defaultWithAlign
5 |
6 | align.openParenCallSite = false
7 | align.openParenDefnSite = false
8 | align.tokens = [{code = "->"}, {code = "<-"}, {code = "=>", owner = "Case"}]
9 |
10 | continuationIndent.callSite = 2
11 | continuationIndent.defnSite = 2
12 |
13 | danglingParentheses.preset = true
14 |
15 | docstrings.style = keep
16 |
17 | indentOperator = spray
18 | indentOperator.excludeRegex = "^(&&|\\|\\||~)$"
19 | indentOperator.exemptScope = all
20 |
21 | literals.hexDigits = Upper
22 |
23 | maxColumn = 100
24 |
25 | newlines.afterCurlyLambdaParams = keep
26 | newlines.beforeCurlyLambdaParams = multilineWithCaseOnly
27 | newlines.inInterpolation = oneline
28 | newlines.topLevelBodyIfMinStatements = [before]
29 | newlines.topLevelStatementBlankLines = [
30 | { blanks { before = 1, after = 0 } }
31 | ]
32 |
33 | project.excludeFilters = [".*\\.sbt"]
34 |
35 | rewrite.rules = [RedundantParens, ExpandImportSelectors, AvoidInfix]
36 |
37 | spaces.inImportCurlyBraces = false
38 |
--------------------------------------------------------------------------------
/iep-lwc-fwding-admin/src/test/scala/com/netflix/iep/lwc/fwd/admin/TestAssertions.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2014-2025 Netflix, Inc.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.netflix.iep.lwc.fwd.admin
17 |
18 | import munit.Assertions
19 |
20 | trait TestAssertions extends Assertions {
21 |
22 | def assertFailure(f: => Any, expectedMsg: String): Unit = {
23 | val exception = intercept[IllegalArgumentException](f)
24 | assert(exception.getMessage.contains(expectedMsg))
25 | }
26 | }
27 |
--------------------------------------------------------------------------------
/iep-archaius/src/main/scala/com/netflix/iep/archaius/package.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2014-2025 Netflix, Inc.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.netflix.iep
17 |
18 | import software.amazon.awssdk.services.dynamodb.model.AttributeValue
19 |
20 | /**
21 | * Helper types for working with properties.
22 | */
23 | package object archaius {
24 |
25 | type AttrMap = java.util.Map[String, AttributeValue]
26 | type Items = java.util.List[AttrMap]
27 | type PropList = List[PropertiesApi.Property]
28 | }
29 |
--------------------------------------------------------------------------------
/atlas-druid/src/test/resources/groupByResponse.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "version": "v1",
4 | "timestamp": "2019-03-28T15:25:00.000Z",
5 | "event": {
6 | "value": 0,
7 | "percentile": null
8 | }
9 | },
10 | {
11 | "version": "v1",
12 | "timestamp": "2019-03-28T15:25:00.000Z",
13 | "event": {
14 | "value": 0,
15 | "percentile": "T0000"
16 | }
17 | },
18 | {
19 | "version": "v1",
20 | "timestamp": "2019-03-28T15:25:00.000Z",
21 | "event": {
22 | "value": 0,
23 | "percentile": "T0002"
24 | }
25 | },
26 | {
27 | "version": "v1",
28 | "timestamp": "2019-03-28T15:25:00.000Z",
29 | "event": {
30 | "value": 1,
31 | "percentile": "T002A"
32 | }
33 | },
34 | {
35 | "version": "v1",
36 | "timestamp": "2019-03-28T15:25:00.000Z",
37 | "event": {
38 | "value": 1,
39 | "percentile": "T002B"
40 | }
41 | },
42 | {
43 | "version": "v1",
44 | "timestamp": "2019-03-28T15:25:00.000Z",
45 | "event": {
46 | "value": 1,
47 | "percentile": "T002C"
48 | }
49 | }
50 | ]
--------------------------------------------------------------------------------
/iep-lwc-bridge/src/main/scala/com/netflix/iep/lwc/package.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2014-2025 Netflix, Inc.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.netflix.iep
17 |
18 | import com.netflix.spectator.atlas.impl.EvalPayload
19 | import com.netflix.spectator.atlas.impl.Subscription
20 |
21 | package object lwc {
22 |
23 | type SubscriptionList = java.util.List[Subscription]
24 | type JMap = java.util.Map[String, String]
25 |
26 | type MetricList = java.util.ArrayList[EvalPayload.Metric]
27 | type MessageList = java.util.ArrayList[EvalPayload.Message]
28 | }
29 |
--------------------------------------------------------------------------------
/atlas-cloudwatch/src/main/resources/vpn.conf:
--------------------------------------------------------------------------------
1 |
2 | atlas {
3 | cloudwatch {
4 |
5 | // Metrics for the site-to-site vpn service
6 | // https://docs.aws.amazon.com/vpn/latest/s2svpn/monitoring-cloudwatch-vpn.html
7 | vpn = {
8 | namespace = "AWS/VPN"
9 | period = 1m
10 | end-period-offset = 4
11 |
12 | dimensions = [
13 | "VpnId",
14 | ]
15 |
16 | metrics = [
17 | {
18 | name = "TunnelState"
19 | alias = "aws.vpn.tunnelState"
20 | conversion = "max"
21 | },
22 | {
23 | name = "TunnelDataIn"
24 | alias = "aws.vpn.tunnelData"
25 | conversion = "sum,rate"
26 | tags = [
27 | {
28 | key = "id"
29 | value = "in"
30 | }
31 | ]
32 | },
33 | {
34 | name = "TunnelDataOut"
35 | alias = "aws.vpn.tunnelData"
36 | conversion = "sum,rate"
37 | tags = [
38 | {
39 | key = "id"
40 | value = "out"
41 | }
42 | ]
43 | },
44 | ]
45 | }
46 | }
47 | }
48 |
--------------------------------------------------------------------------------
/atlas-druid/src/main/scala/com/netflix/atlas/druid/package.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2014-2025 Netflix, Inc.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.netflix.atlas
17 |
18 | import org.apache.pekko.http.scaladsl.model.HttpRequest
19 | import org.apache.pekko.http.scaladsl.model.HttpResponse
20 | import org.apache.pekko.stream.scaladsl.Flow
21 | import org.apache.pekko.NotUsed
22 | import com.netflix.atlas.pekko.AccessLogger
23 |
24 | import scala.util.Try
25 |
26 | package object druid {
27 |
28 | type HttpClient = Flow[(HttpRequest, AccessLogger), (Try[HttpResponse], AccessLogger), NotUsed]
29 | }
30 |
--------------------------------------------------------------------------------
/atlas-cloudwatch/src/main/resources/events.conf:
--------------------------------------------------------------------------------
1 |
2 | atlas {
3 | cloudwatch {
4 |
5 | // http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cwe-metricscollected.html
6 | events = {
7 | namespace = "AWS/Events"
8 | period = 1m
9 | end-period-offset = 25
10 |
11 | dimensions = [
12 | "RuleName"
13 | ]
14 |
15 | metrics = [
16 | {
17 | name = "DeadLetterInvocations"
18 | alias = "aws.events.deadLetterInvocations"
19 | conversion = "sum,rate"
20 | },
21 | {
22 | name = "Invocations"
23 | alias = "aws.events.invocations"
24 | conversion = "sum,rate"
25 | },
26 | {
27 | name = "FailedInvocations"
28 | alias = "aws.events.failedInvocations"
29 | conversion = "sum,rate"
30 | },
31 | {
32 | name = "TriggeredRules"
33 | alias = "aws.events.rulesTriggered"
34 | conversion = "sum,rate"
35 | },
36 | {
37 | name = "ThrottledRules"
38 | alias = "aws.events.rulesThrottled"
39 | conversion = "sum,rate"
40 | }
41 | ]
42 | }
43 | }
44 | }
--------------------------------------------------------------------------------
/atlas-cloudwatch/src/main/resources/trustedadvisor.conf:
--------------------------------------------------------------------------------
1 |
2 | atlas {
3 | cloudwatch {
4 |
5 | // https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/trusted-advisor-metricscollected.html
6 | // Mapping for service limit metrics only. It isn't clear the others are of any use to us
7 | // at this time.
8 | trustedadvisor = {
9 | namespace = "AWS/TrustedAdvisor"
10 |
11 | // Update frequency seems to be ~1h15m, but isn't as regular as most other cloudwatch
12 | // data. Using 2h to provide some buffer.
13 | period = 2h
14 | end-period-offset = 1
15 | period-count = 2
16 |
17 | // Note, TA data for all regions seems to show up in CloudWatch for us-east-1
18 | dimensions = [
19 | "Region",
20 | "ServiceName",
21 | "ServiceLimit"
22 | ]
23 |
24 | metrics = [
25 | {
26 | name = "ServiceLimitUsage"
27 | alias = "aws.trustedadvisor.serviceLimitUsage"
28 | // AWS documentation and unit indicate it is a percentage, but the actual values
29 | // are from 0 to 1 instead of 0 to 100.
30 | conversion = "max,percent"
31 | }
32 | ]
33 | }
34 | }
35 | }
--------------------------------------------------------------------------------
/atlas-slotting/src/test/resources/atlas_app-main-all-v001.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "atlas_app-main-all-v001",
3 | "cluster": "atlas_app-main-all",
4 | "createdTime": 1552023610994,
5 | "desiredCapacity": 3,
6 | "maxSize": 6,
7 | "minSize": 0,
8 | "instances": [
9 | {
10 | "availabilityZone": "us-west-2b",
11 | "imageId": "ami-001",
12 | "instanceId": "i-001",
13 | "instanceType": "r4.large",
14 | "launchTime": 1552023619000,
15 | "lifecycleState": "InService",
16 | "privateIpAddress": "192.168.1.1",
17 | "slot": 0
18 | },
19 | {
20 | "availabilityZone": "us-west-2a",
21 | "imageId": "ami-001",
22 | "instanceId": "i-002",
23 | "instanceType": "r4.large",
24 | "launchTime": 1552023619000,
25 | "lifecycleState": "InService",
26 | "privateIpAddress": "192.168.1.2",
27 | "slot": 1
28 | },
29 | {
30 | "availabilityZone": "us-west-2b",
31 | "imageId": "ami-001",
32 | "instanceId": "i-003",
33 | "instanceType": "r4.large",
34 | "launchTime": 1552023619000,
35 | "lifecycleState": "InService",
36 | "privateIpAddress": "192.168.1.3",
37 | "slot": 2
38 | }
39 | ]
40 | }
--------------------------------------------------------------------------------
/atlas-slotting/src/test/resources/atlas_app-main-all-v002.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "atlas_app-main-all-v002",
3 | "cluster": "atlas_app-main-all",
4 | "createdTime": 1552023610994,
5 | "desiredCapacity": 3,
6 | "maxSize": 6,
7 | "minSize": 0,
8 | "instances": [
9 | {
10 | "availabilityZone": "us-west-2b",
11 | "imageId": "ami-001",
12 | "instanceId": "i-101",
13 | "instanceType": "r4.large",
14 | "launchTime": 1552023619000,
15 | "lifecycleState": "InService",
16 | "privateIpAddress": "192.168.2.1",
17 | "slot": 0
18 | },
19 | {
20 | "availabilityZone": "us-west-2a",
21 | "imageId": "ami-001",
22 | "instanceId": "i-102",
23 | "instanceType": "r4.large",
24 | "launchTime": 1552023619000,
25 | "lifecycleState": "InService",
26 | "privateIpAddress": "192.168.2.2",
27 | "slot": 1
28 | },
29 | {
30 | "availabilityZone": "us-west-2b",
31 | "imageId": "ami-001",
32 | "instanceId": "i-103",
33 | "instanceType": "r4.large",
34 | "launchTime": 1552023619000,
35 | "lifecycleState": "InService",
36 | "privateIpAddress": "192.168.2.3",
37 | "slot": 2
38 | }
39 | ]
40 | }
--------------------------------------------------------------------------------
/atlas-slotting/src/test/resources/atlas_app-main-none-v001.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "atlas_app-main-none-v001",
3 | "cluster": "atlas_app-main-none",
4 | "createdTime": 1552023610994,
5 | "desiredCapacity": 3,
6 | "maxSize": 6,
7 | "minSize": 0,
8 | "instances": [
9 | {
10 | "availabilityZone": "us-west-2b",
11 | "imageId": "ami-001",
12 | "instanceId": "i-003",
13 | "instanceType": "r4.large",
14 | "launchTime": 1552023619000,
15 | "lifecycleState": "InService",
16 | "privateIpAddress": "192.168.1.3",
17 | "slot": 0
18 | },
19 | {
20 | "availabilityZone": "us-west-2a",
21 | "imageId": "ami-001",
22 | "instanceId": "i-004",
23 | "instanceType": "r4.large",
24 | "launchTime": 1552023619000,
25 | "lifecycleState": "InService",
26 | "privateIpAddress": "192.168.1.4",
27 | "slot": 1
28 | },
29 | {
30 | "availabilityZone": "us-west-2b",
31 | "imageId": "ami-001",
32 | "instanceId": "i-005",
33 | "instanceType": "r4.large",
34 | "launchTime": 1552023619000,
35 | "lifecycleState": "InService",
36 | "privateIpAddress": "192.168.1.5",
37 | "slot": 2
38 | }
39 | ]
40 | }
--------------------------------------------------------------------------------
/atlas-druid/src/main/scala/com/netflix/atlas/druid/DruidSort.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2014-2025 Netflix, Inc.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.netflix.atlas.druid
17 |
18 | trait DruidSort
19 |
20 | object DruidSort {
21 |
22 | case object Lexicographic extends DruidSort {
23 | val `type`: String = "lexicographic"
24 | }
25 |
26 | case object Alphanumeric extends DruidSort {
27 | val `type`: String = "alphanumeric"
28 | }
29 |
30 | case object Numeric extends DruidSort {
31 | val `type`: String = "numeric"
32 | }
33 |
34 | case object Strlen extends DruidSort {
35 | val `type`: String = "strlen"
36 | }
37 | }
38 |
--------------------------------------------------------------------------------
/atlas-druid/src/main/scala/com/netflix/atlas/druid/ReduceStepTimeSeq.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2014-2025 Netflix, Inc.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.netflix.atlas.druid
17 |
18 | import com.netflix.atlas.core.model.DsType
19 | import com.netflix.atlas.core.model.TimeSeq
20 |
21 | class ReduceStepTimeSeq(ts: TimeSeq, val step: Long) extends TimeSeq {
22 |
23 | require(
24 | ts.step % step == 0,
25 | "original step must be a multiple of reduced step"
26 | )
27 |
28 | def dsType: DsType = ts.dsType
29 |
30 | def apply(timestamp: Long): Double = {
31 | val t = timestamp / ts.step * ts.step
32 | ts(t + ts.step)
33 | }
34 | }
35 |
--------------------------------------------------------------------------------
/atlas-persistence/src/main/resources/application.conf:
--------------------------------------------------------------------------------
1 | atlas {
2 | pekko {
3 | api-endpoints = [
4 | "com.netflix.atlas.pekko.ConfigApi",
5 | "com.netflix.atlas.pekko.HealthcheckApi",
6 | "com.netflix.atlas.persistence.PersistenceApi"
7 | ]
8 | }
9 |
10 | persistence {
11 | queue-size = 2000
12 | local-file {
13 | data-dir = "./out"
14 | max-records = 100000
15 | max-duration = 5m
16 | max-late-duration = 5m
17 | // Compression codec, can be: "null", deflate, snappy
18 | avro-codec = "deflate"
19 | // Compression level specifically for deflate
20 | avro-deflate-compressionLevel = 6
21 | // avro buffer size(before compress), default is 64k, suggested values are between 2K and 2M
22 | avro-syncInterval = 1024000
23 | }
24 |
25 | s3 {
26 | bucket = my-bucket
27 | region = my-region
28 | prefix = my-prefix
29 | cleanup-timeout = 1h
30 | // A ".tmp" file not modified for this duration will be marked as complete (renamed), and it
31 | // MUST be longer than local-file.max-duration to avoid conflict
32 | max-inactive-duration = 7m
33 | client-timeout = 5m
34 | thread-pool-size = 50
35 | }
36 | }
37 | }
38 |
39 | include "custom.conf"
40 |
--------------------------------------------------------------------------------
/.github/workflows/snapshot.yml:
--------------------------------------------------------------------------------
1 | name: Snapshot
2 |
3 | on:
4 | push:
5 | branches:
6 | - main
7 | - v[0-9]+.[0-9]+.x
8 |
9 | jobs:
10 | build:
11 | if: ${{ github.repository == 'Netflix-Skunkworks/iep-apps' }}
12 | runs-on: ubuntu-latest
13 | steps:
14 | - uses: actions/checkout@v5
15 | - name: Set up JDK
16 | uses: actions/setup-java@v5
17 | with:
18 | java-version: 25
19 | distribution: 'zulu'
20 | cache: 'sbt'
21 | - name: SetupPGP
22 | run: echo $ORG_SIGNING_KEY | sed -r 's/-----[^-]+-----//g;s/\n//g;s/ //g' | base64 --decode | gpg --batch --import
23 | env:
24 | ORG_SIGNING_KEY: ${{ secrets.ORG_SIGNING_KEY }}
25 | - name: Build
26 | env:
27 | NETFLIX_OSS_REPO_USERNAME: ${{ secrets.ORG_NETFLIXOSS_USERNAME }}
28 | NETFLIX_OSS_REPO_PASSWORD: ${{ secrets.ORG_NETFLIXOSS_PASSWORD }}
29 | NETFLIX_OSS_SONATYPE_USERNAME: ${{ secrets.ORG_SONATYPE_USERNAME }}
30 | NETFLIX_OSS_SONATYPE_PASSWORD: ${{ secrets.ORG_SONATYPE_PASSWORD }}
31 | PGP_PASSPHRASE: ${{ secrets.ORG_SIGNING_PASSWORD }}
32 | run: |
33 | git fetch --unshallow --tags
34 | cat /dev/null | project/sbt clean test +publishSigned
35 |
--------------------------------------------------------------------------------
/atlas-cloudwatch/src/main/scala/com/netflix/atlas/cloudwatch/Tagger.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2014-2025 Netflix, Inc.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.netflix.atlas.cloudwatch
17 |
18 | import com.netflix.atlas.core.model.Datapoint
19 | import software.amazon.awssdk.services.cloudwatch.model.Dimension
20 |
21 | trait Tagger {
22 |
23 | /**
24 | * Converts a list of cloudwatch dimensions into a tag map that can be used
25 | * for Atlas.
26 | */
27 | def apply(dimensions: List[Dimension]): Map[String, String]
28 |
29 | /**
30 | * Applies the approved character set to the given data point in preparation for publishing.
31 | */
32 | def fixTags(d: Datapoint): Datapoint
33 |
34 | }
35 |
--------------------------------------------------------------------------------
/atlas-cloudwatch/src/main/resources/s3-replication.conf:
--------------------------------------------------------------------------------
1 |
2 | atlas {
3 | cloudwatch {
4 |
5 | // http://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html
6 | s3-replication = {
7 | namespace = "AWS/S3"
8 | period = 1m
9 | # Note that this data repeatedly posts in batches of timestamps so going offset
10 | # the max stale age is not a good idea. Use the average instead and pad it a little.
11 | end-period-offset = 15
12 |
13 | dimensions = [
14 | "SourceBucket",
15 | "DestinationBucket",
16 | "RuleId"
17 | ]
18 |
19 | metrics = [
20 | {
21 | name = "ReplicationLatency"
22 | alias = "aws.s3.replicationLatency"
23 | conversion = "max"
24 | },
25 | {
26 | name = "BytesPendingReplication"
27 | alias = "aws.s3.bytesPendingReplication"
28 | conversion = "max"
29 | },
30 | {
31 | name = "OperationsPendingReplication"
32 | alias = "aws.s3.operationsPendingReplication"
33 | conversion = "max"
34 | },
35 | {
36 | name = "OperationsFailedReplication"
37 | alias = "aws.s3.operationsFailedReplication"
38 | conversion = "sum,rate"
39 | }
40 | ]
41 | }
42 | }
43 | }
--------------------------------------------------------------------------------
/atlas-cloudwatch/src/main/resources/sns.conf:
--------------------------------------------------------------------------------
1 |
2 | atlas {
3 | cloudwatch {
4 |
5 | // http://docs.aws.amazon.com/sns/latest/dg/MonitorSNSwithCloudWatch.html
6 | sns = {
7 | namespace = "AWS/SNS"
8 | period = 1m
9 | end-period-offset = 10
10 |
11 | dimensions = [
12 | "TopicName"
13 | ]
14 |
15 | metrics = [
16 | {
17 | name = "NumberOfMessagesPublished"
18 | alias = "aws.sns.messagesPublished"
19 | conversion = "sum,rate"
20 | },
21 | {
22 | name = "NumberOfNotificationsDelivered"
23 | alias = "aws.sns.notifications"
24 | conversion = "sum,rate"
25 | tags = [
26 | {
27 | key = "id"
28 | value = "delivered"
29 | }
30 | ]
31 | },
32 | {
33 | name = "NumberOfNotificationsFailed"
34 | alias = "aws.sns.notifications"
35 | conversion = "sum,rate"
36 | tags = [
37 | {
38 | key = "id"
39 | value = "failed"
40 | }
41 | ]
42 | },
43 | {
44 | name = "PublishSize"
45 | alias = "aws.sns.messageSize"
46 | conversion = "dist-summary"
47 | }
48 | ]
49 | }
50 | }
51 | }
--------------------------------------------------------------------------------
/iep-lwc-fwding-admin/src/test/scala/com/netflix/iep/lwc/fwd/admin/ExpressionDetailsDaoTestImpl.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2014-2025 Netflix, Inc.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.netflix.iep.lwc.fwd.admin
17 |
18 | import com.netflix.iep.lwc.fwd.cw.ExpressionId
19 |
20 | class ExpressionDetailsDaoTestImpl extends ExpressionDetailsDao {
21 |
22 | override def save(exprDetails: ExpressionDetails): Unit = {}
23 | override def read(id: ExpressionId): Option[ExpressionDetails] = None
24 | override def scan(): List[ExpressionId] = Nil
25 | override def queryPurgeEligible(now: Long, events: List[String]): List[ExpressionId] = Nil
26 | override def delete(id: ExpressionId): Unit = {}
27 | override def isPurgeEligible(ed: ExpressionDetails, now: Long): Boolean = true
28 | }
29 |
--------------------------------------------------------------------------------
/.github/workflows/release.yml:
--------------------------------------------------------------------------------
1 | name: Release
2 |
3 | on:
4 | push:
5 | tags:
6 | - v[0-9]+.[0-9]+.[0-9]+
7 | - v[0-9]+.[0-9]+.[0-9]+-rc.[0-9]+
8 |
9 | jobs:
10 | build:
11 | if: ${{ github.repository == 'Netflix-Skunkworks/iep-apps' }}
12 | runs-on: ubuntu-latest
13 | steps:
14 | - uses: actions/checkout@v5
15 | - name: Set up JDK
16 | uses: actions/setup-java@v5
17 | with:
18 | java-version: 25
19 | distribution: 'zulu'
20 | cache: 'sbt'
21 | - name: SetupPGP
22 | run: echo $ORG_SIGNING_KEY | sed -r 's/-----[^-]+-----//g;s/\n//g;s/ //g' | base64 --decode | gpg --batch --import
23 | env:
24 | ORG_SIGNING_KEY: ${{ secrets.ORG_SIGNING_KEY }}
25 | - name: Build
26 | env:
27 | NETFLIX_OSS_REPO_USERNAME: ${{ secrets.ORG_NETFLIXOSS_USERNAME }}
28 | NETFLIX_OSS_REPO_PASSWORD: ${{ secrets.ORG_NETFLIXOSS_PASSWORD }}
29 | NETFLIX_OSS_SONATYPE_USERNAME: ${{ secrets.ORG_SONATYPE_USERNAME }}
30 | NETFLIX_OSS_SONATYPE_PASSWORD: ${{ secrets.ORG_SONATYPE_PASSWORD }}
31 | PGP_PASSPHRASE: ${{ secrets.ORG_SIGNING_PASSWORD }}
32 | run: |
33 | git fetch --unshallow --tags
34 | cat /dev/null | project/sbt clean test +publishSigned
35 | cat /dev/null | project/sbt sonaRelease
36 |
--------------------------------------------------------------------------------
/atlas-druid/src/main/scala/com/netflix/atlas/druid/DruidMetadataService.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2014-2025 Netflix, Inc.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.netflix.atlas.druid
17 |
18 | import com.netflix.iep.service.AbstractService
19 |
20 | /**
21 | * Used to indicate the service is not healthy until the metadata has been updated
22 | * at least once.
23 | */
24 | class DruidMetadataService extends AbstractService {
25 |
26 | @volatile private var metadataAvailable: Boolean = false
27 |
28 | def metadataRefreshed(): Unit = {
29 | metadataAvailable = true
30 | }
31 |
32 | override def isHealthy: Boolean = {
33 | super.isHealthy && metadataAvailable
34 | }
35 |
36 | override def startImpl(): Unit = {}
37 |
38 | override def stopImpl(): Unit = {}
39 | }
40 |
--------------------------------------------------------------------------------
/iep-lwc-cloudwatch-model/src/main/scala/com/netflix/iep/lwc/fwd/cw/Report.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2014-2025 Netflix, Inc.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.netflix.iep.lwc.fwd.cw
17 |
18 | case class Report(
19 | timestamp: Long,
20 | id: ExpressionId,
21 | metric: Option[FwdMetricInfo],
22 | error: Option[Throwable]
23 | ) {
24 |
25 | def metricWithTimestamp(): Option[FwdMetricInfo] = {
26 | metric.map(_.copy(timestamp = Some(timestamp)))
27 | }
28 | }
29 |
30 | case class FwdMetricInfo(
31 | region: String,
32 | account: String,
33 | name: String,
34 | dimensions: Map[String, String],
35 | timestamp: Option[Long] = None
36 | ) {
37 |
38 | def equalsIgnoreTimestamp(that: FwdMetricInfo): Boolean = {
39 | this.copy(timestamp = None) == that.copy(timestamp = None)
40 | }
41 |
42 | }
43 |
--------------------------------------------------------------------------------
/atlas-cloudwatch/src/main/scala/com/netflix/atlas/cloudwatch/AwsAccountSupplier.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2014-2025 Netflix, Inc.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.netflix.atlas.cloudwatch
17 |
18 | import software.amazon.awssdk.regions.Region
19 |
20 | /**
21 | * Interface for supplying the list of accounts, regions and namespaces to poll for CloudWatch metrics.
22 | */
23 | trait AwsAccountSupplier {
24 |
25 | /**
26 | * The map of accounts to regions to namespaces for polling. The final set is the namespace list in
27 | * the format of CloudWatch, e.g. "AWS/EC2" or "AWS/ECS".
28 | * @return
29 | * The non-null map of account IDs to poll for CloudWatch metrics along with the regions they
30 | * operate in and namespaces to poll.
31 | */
32 | def accounts: Map[String, Map[Region, Set[String]]]
33 |
34 | }
35 |
--------------------------------------------------------------------------------
/atlas-persistence/src/test/scala/com/netflix/atlas/persistence/S3CopySinkSuite.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2014-2025 Netflix, Inc.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.netflix.atlas.persistence
17 |
18 | import munit.FunSuite
19 |
20 | class S3CopySinkSuite extends FunSuite {
21 |
22 | test("extractMinuteRange") {
23 | assertEquals(S3CopySink.extractMinuteRange("abc.tmp"), "61-61")
24 | assertEquals(S3CopySink.extractMinuteRange("abc.1200-1300"), "20-21")
25 | assertEquals(S3CopySink.extractMinuteRange("abc.0000-0123"), "00-02")
26 | }
27 |
28 | test("extractMinuteRange - handles short file names and non-tmp, non-range files") {
29 | // Should throw an exception for too-short file names (simulate bad input)
30 | intercept[StringIndexOutOfBoundsException] {
31 | S3CopySink.extractMinuteRange("abc.data")
32 | }
33 | }
34 | }
35 |
--------------------------------------------------------------------------------
/iep-lwc-cloudwatch/src/main/resources/application.conf:
--------------------------------------------------------------------------------
1 |
2 | iep.lwc.cloudwatch {
3 | uri = "http://localhost:7102/api/v2/sync/cloudwatch-forwarding/clusters"
4 | namespace = "NFLX/EPIC"
5 |
6 | // Filter applied to URIs in the config, this is typically used to restrict a given instance
7 | // to a subset of the configuration
8 | filter = ".*"
9 |
10 | admin-uri = "http://localhost:7103/api/v1/cw/report"
11 |
12 | // Should it actually try to perform the put call to CloudWatch? This can be used to disable
13 | // for purposes of debugging.
14 | put-enabled = true
15 | }
16 |
17 | atlas {
18 | pekko {
19 | api-endpoints = [
20 | "com.netflix.atlas.pekko.ConfigApi",
21 | "com.netflix.atlas.pekko.HealthcheckApi",
22 | "com.netflix.iep.lwc.StatsApi"
23 | ]
24 | }
25 | }
26 |
27 | netflix.iep.aws {
28 | // Configs for clients named by region, used to allow publishing across
29 | // regions
30 | ap-south-1 {
31 | region = "ap-south-1"
32 | }
33 | ap-southeast-2 {
34 | region = "ap-southeast-2"
35 | }
36 | eu-west-1 {
37 | region = "eu-west-1"
38 | }
39 | us-west-1 {
40 | region = "us-west-1"
41 | }
42 | us-west-2 {
43 | region = "us-west-2"
44 | }
45 | us-east-1 {
46 | region = "us-east-1"
47 | }
48 | us-east-2 {
49 | region = "us-east-2"
50 | }
51 | }
52 |
53 | // User specific configuration with settings for an internal deployment
54 | include "custom.conf"
55 |
--------------------------------------------------------------------------------
/atlas-aggregator/src/test/scala/com/netflix/atlas/aggregator/AggrConfigSuite.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2014-2025 Netflix, Inc.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.netflix.atlas.aggregator
17 |
18 | import com.netflix.spectator.api.ManualClock
19 | import com.netflix.spectator.api.NoopRegistry
20 | import com.typesafe.config.ConfigFactory
21 | import munit.FunSuite
22 |
23 | class AggrConfigSuite extends FunSuite {
24 |
25 | test("initial polling delay") {
26 | val step = 60000L
27 | val clock = new ManualClock()
28 | val config = new AggrConfig(ConfigFactory.load(), new NoopRegistry, null)
29 | (0L until step).foreach { t =>
30 | clock.setWallTime(t)
31 | val delay = config.initialPollingDelay(clock, step)
32 | val stepOffset = (t + delay) % step
33 | assert(stepOffset >= 3000)
34 | assert(stepOffset <= 48000)
35 | }
36 | }
37 | }
38 |
--------------------------------------------------------------------------------
/iep-lwc-loadgen/README.md:
--------------------------------------------------------------------------------
1 |
2 | ## Description
3 |
4 | Service for generating load against a service running the Atlas LWCAPI. It is primarly used
5 | for helping to test new versions of the service and the eval client with a configured set
6 | of load to ensure there are no performance regressions.
7 |
8 | ## Usage
9 |
10 | Update the `iep.lwc.loadgen.uris` config setting with a list of Atlas URIs to stream. Example:
11 |
12 | ```
13 | iep.lwc.loadgen.uris = [
14 | "http://localhost:7101/api/v1/graph?q=name,m1,:eq,:avg",
15 | "http://localhost:7101/api/v1/graph?q=name,m2,:eq,:sum",
16 | "http://localhost:7101/api/v1/graph?q=name,m3,:eq,:sum&step=10s"
17 | ]
18 | ```
19 |
20 | The default step size is 60s. It can be customized for a given URI by using the `step` query
21 | parameter.
22 |
23 | ## Clone Existing Deployment
24 |
25 | To copy the expression set from an existing LWCAPI deployment, the following script can be
26 | adapted to generate the config:
27 |
28 | ```bash
29 | #!/bin/bash
30 |
31 | LWC_HOST=localhost:7101
32 | ATLAS_HOST=localhost:7102
33 | CONF_FILE=iep-lwc-loadgen/src/main/resources/custom.conf
34 |
35 | curl -s "http://$LWC_HOST/lwc/api/v1/expressions" |
36 | jq --arg host "$ATLAS_HOST" '
37 | .expressions[] |
38 | @text "http://\($host)/api/v1/graph?q=\(.expression)&step=\(.frequency / 1000)s"' |
39 | awk '
40 | BEGIN { print "iep.lwc.loadgen.uris = ["}
41 | { print " " $0 "," }
42 | END { print "]" }
43 | ' > $CONF_FILE
44 | ```
45 |
46 |
--------------------------------------------------------------------------------
/iep-lwc-cloudwatch-model/src/main/scala/com/netflix/iep/lwc/fwd/cw/ClusterConfig.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2014-2025 Netflix, Inc.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.netflix.iep.lwc.fwd.cw
17 |
18 | case class ClusterConfig(
19 | email: String,
20 | expressions: List[ForwardingExpression],
21 | checksToSkip: List[String] = Nil
22 | ) {
23 |
24 | def shouldSkip(name: String): Boolean = {
25 | checksToSkip.contains(name)
26 | }
27 | }
28 |
29 | case class ForwardingExpression(
30 | atlasUri: String,
31 | account: String,
32 | region: Option[String],
33 | metricName: String,
34 | dimensions: List[ForwardingDimension] = Nil
35 | ) {
36 |
37 | require(atlasUri != null, "atlasUri cannot be null")
38 | require(account != null, "account cannot be null")
39 | require(metricName != null, "metricName cannot be null")
40 | }
41 |
42 | case class ForwardingDimension(name: String, value: String)
43 |
--------------------------------------------------------------------------------
/atlas-cloudwatch/src/main/scala/com/netflix/atlas/cloudwatch/package.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2014-2025 Netflix, Inc.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.netflix.atlas
17 |
18 | import software.amazon.awssdk.services.cloudwatch.model.Datapoint
19 |
20 | /**
21 | * Helper types used in this package.
22 | */
23 | package object cloudwatch {
24 |
25 | type Tags = Map[String, String]
26 |
27 | /**
28 | * Converts a cloudwatch datapoint to a floating point value. The conversion is
29 | * based on the corresponding [[MetricDefinition]]. The full metadata is passed
30 | * in to allow access to other information that can be useful, such as the period
31 | * used for reporting the data into cloudwatch.
32 | */
33 | type Conversion = (MetricMetadata, Datapoint) => Double
34 |
35 | type AtlasDatapoint = com.netflix.atlas.core.model.Datapoint
36 |
37 | type CloudWatchDatapoint = software.amazon.awssdk.services.cloudwatch.model.Datapoint
38 | }
39 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | # Map stdin to /dev/null to avoid interactive prompts if there is some failure related to the
2 | # build script.
3 | ifeq (${TRAVIS_SCALA_VERSION},)
4 | SBT := cat /dev/null | project/sbt
5 | else
6 | SBT := cat /dev/null | project/sbt ++${TRAVIS_SCALA_VERSION}
7 | endif
8 |
9 | .PHONY: build snapshot release clean coverage format
10 |
11 | build:
12 | $(SBT) clean test checkLicenseHeaders scalafmtCheckAll
13 |
14 | snapshot:
15 | # Travis uses a depth when fetching git data so the tags needed for versioning may not
16 | # be available unless we explicitly fetch them
17 | git fetch --unshallow --tags
18 | $(SBT) storeBintrayCredentials
19 | $(SBT) clean test checkLicenseHeaders publish
20 |
21 | release:
22 | # Travis uses a depth when fetching git data so the tags needed for versioning may not
23 | # be available unless we explicitly fetch them
24 | git fetch --unshallow --tags
25 |
26 | # Storing the bintray credentials needs to be done as a separate command so they will
27 | # be available early enough for the publish task.
28 | #
29 | # The storeBintrayCredentials still needs to be on the subsequent command or we get:
30 | # [error] (iep-service/*:bintrayEnsureCredentials) java.util.NoSuchElementException: None.get
31 | $(SBT) storeBintrayCredentials
32 | $(SBT) clean test checkLicenseHeaders storeBintrayCredentials publish bintrayRelease
33 |
34 | clean:
35 | $(SBT) clean
36 |
37 | coverage:
38 | $(SBT) clean coverage test coverageReport
39 | $(SBT) coverageAggregate
40 |
41 | format:
42 | $(SBT) formatLicenseHeaders scalafmtAll
43 |
44 |
--------------------------------------------------------------------------------
/atlas-cloudwatch/src/main/scala/com/netflix/atlas/cloudwatch/FirehoseMetric.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2014-2025 Netflix, Inc.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.netflix.atlas.cloudwatch
17 |
18 | import com.netflix.atlas.util.XXHasher
19 | import software.amazon.awssdk.services.cloudwatch.model.Datapoint
20 | import software.amazon.awssdk.services.cloudwatch.model.Dimension
21 |
22 | /**
23 | * Container to hold a metric parsed from a Firehose Cloud Watch metric.
24 | */
25 | case class FirehoseMetric(
26 | metricStreamName: String,
27 | namespace: String,
28 | metricName: String,
29 | dimensions: List[Dimension],
30 | datapoint: Datapoint
31 | ) {
32 |
33 | def xxHash: Long = {
34 | var hash = XXHasher.hash(namespace)
35 | hash = XXHasher.updateHash(hash, metricName)
36 | dimensions.sortBy(_.name()).foreach { d =>
37 | hash = XXHasher.updateHash(hash, d.name())
38 | hash = XXHasher.updateHash(hash, d.value())
39 | }
40 | hash
41 | }
42 | }
43 |
--------------------------------------------------------------------------------
/atlas-cloudwatch/src/main/scala/com/netflix/atlas/cloudwatch/poller/MetricValue.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2014-2025 Netflix, Inc.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.netflix.atlas.cloudwatch.poller
17 |
18 | import com.netflix.spectator.api.Id
19 |
20 | sealed trait MetricValue {
21 | def id: Id
22 | }
23 |
24 | object MetricValue {
25 |
26 | // Utility function to create an Id with tags
27 | def createId(name: String, tags: Map[String, String]): Id = {
28 | import scala.jdk.CollectionConverters._
29 | val ts = (tags - "name").map {
30 | case (k, v) => k -> (if (v.length > 120) "VALUE_TOO_LONG" else v)
31 | }.asJava
32 | Id.create(name).withTags(ts)
33 | }
34 |
35 | def apply(name: String, value: Double): MetricValue = {
36 | DoubleValue(Map("name" -> name), value)
37 | }
38 | }
39 |
40 | // Case class for Double values
41 | case class DoubleValue(tags: Map[String, String], value: Double) extends MetricValue {
42 | def id: Id = MetricValue.createId(tags("name"), tags)
43 | }
44 |
--------------------------------------------------------------------------------
/project/SonatypeSettings.scala:
--------------------------------------------------------------------------------
1 | import sbt._
2 | import sbt.Keys._
3 |
4 | object SonatypeSettings {
5 |
6 | private def get(k: String): String = {
7 | sys.env.getOrElse(s"NETFLIX_OSS_SONATYPE_$k", s"missing$k")
8 | }
9 |
10 | private lazy val user = get("USERNAME")
11 | private lazy val pass = get("PASSWORD")
12 |
13 | lazy val settings: Seq[Def.Setting[_]] = Seq(
14 | organization := "com.netflix.iep-apps",
15 | organizationName := "netflix",
16 | organizationHomepage := Some(url("https://github.com/Netflix")),
17 | homepage := Some(url("https://github.com/Netflix-Skunkworks/iep-apps")),
18 | description := "In-memory time series database",
19 |
20 | scmInfo := Some(
21 | ScmInfo(
22 | url("https://github.com/Netflix-Skunkworks/iep-apps"),
23 | "scm:git@github.com:Netflix-Skunkworks/iep-apps.git"
24 | )
25 | ),
26 |
27 | developers := List(
28 | Developer(
29 | id = "netflixgithub",
30 | name = "Netflix Open Source Development",
31 | email = "netflixoss@netflix.com",
32 | url = url("https://github.com/Netflix")
33 | )
34 | ),
35 |
36 | publishMavenStyle := true,
37 | pomIncludeRepository := { _ => false },
38 |
39 | licenses += ("Apache 2" -> url("https://www.apache.org/licenses/LICENSE-2.0.txt")),
40 | credentials += Credentials("Sonatype Nexus Repository Manager", "central.sonatype.com", user, pass),
41 |
42 | publishTo := {
43 | if (isSnapshot.value)
44 | Some(Resolver.sonatypeCentralSnapshots)
45 | else
46 | localStaging.value
47 | }
48 | )
49 | }
50 |
--------------------------------------------------------------------------------
/atlas-druid/src/main/scala/com/netflix/atlas/druid/AppConfiguration.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2014-2025 Netflix, Inc.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.netflix.atlas.druid
17 |
18 | import com.netflix.atlas.pekko.AccessLogger
19 | import com.typesafe.config.Config
20 | import com.typesafe.config.ConfigFactory
21 | import org.apache.pekko.actor.ActorSystem
22 |
23 | import java.util.Optional
24 | import org.apache.pekko.http.scaladsl.Http
25 | import org.springframework.context.annotation.Bean
26 | import org.springframework.context.annotation.Configuration
27 |
28 | @Configuration
29 | class AppConfiguration {
30 |
31 | @Bean
32 | def metadataService: DruidMetadataService = {
33 | new DruidMetadataService
34 | }
35 |
36 | @Bean
37 | def druidClient(config: Optional[Config], system: ActorSystem): DruidClient = {
38 | val c = config.orElseGet(() => ConfigFactory.load())
39 | implicit val sys: ActorSystem = system
40 | new DruidClient(c.getConfig("atlas.druid"), system, Http().superPool[AccessLogger]())
41 | }
42 | }
43 |
--------------------------------------------------------------------------------
/iep-lwc-loadgen/src/test/scala/com/netflix/iep/loadgen/LoadGenServiceSuite.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2014-2025 Netflix, Inc.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.netflix.iep.loadgen
17 |
18 | import java.time.Duration
19 |
20 | import munit.FunSuite
21 |
22 | class LoadGenServiceSuite extends FunSuite {
23 |
24 | test("extract step from uri") {
25 | val actual = LoadGenService.extractStep("/graph?q=name,foo,:eq&step=60s")
26 | assertEquals(actual, Some(Duration.ofSeconds(60)))
27 | }
28 |
29 | test("extract step from uri, not present") {
30 | val actual = LoadGenService.extractStep("/graph?q=name,foo,:eq")
31 | assertEquals(actual, None)
32 | }
33 |
34 | test("extract step from uri, invalid uri") {
35 | val actual = LoadGenService.extractStep("/graph?q=name,{{ .SpinnakerApp }},:eq")
36 | assertEquals(actual, None)
37 | }
38 |
39 | test("extract step from uri, invalid step") {
40 | val actual = LoadGenService.extractStep("/graph?q=name,foo,:eq&step=bad")
41 | assertEquals(actual, None)
42 | }
43 | }
44 |
--------------------------------------------------------------------------------
/atlas-stream/src/main/scala/com/netflix/atlas/stream/AppConfiguration.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2014-2025 Netflix, Inc.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.netflix.atlas.stream
17 |
18 | import org.apache.pekko.actor.ActorSystem
19 | import com.netflix.atlas.eval.stream.Evaluator
20 | import com.netflix.spectator.api.NoopRegistry
21 | import com.netflix.spectator.api.Registry
22 | import com.typesafe.config.Config
23 | import com.typesafe.config.ConfigFactory
24 | import org.springframework.context.annotation.Bean
25 | import org.springframework.context.annotation.Configuration
26 |
27 | import java.util.Optional
28 |
29 | @Configuration
30 | class AppConfiguration {
31 |
32 | @Bean
33 | def evalService(
34 | config: Optional[Config],
35 | registry: Optional[Registry],
36 | evaluator: Evaluator,
37 | system: ActorSystem
38 | ): EvalService = {
39 | val c = config.orElseGet(() => ConfigFactory.load())
40 | val r = registry.orElseGet(() => new NoopRegistry)
41 | new EvalService(c, r, evaluator, system)
42 | }
43 | }
44 |
--------------------------------------------------------------------------------
/iep-lwc-fwding-admin/src/main/resources/application.conf:
--------------------------------------------------------------------------------
1 |
2 | atlas.pekko {
3 | api-endpoints = [
4 | "com.netflix.atlas.pekko.ConfigApi",
5 | "com.netflix.atlas.pekko.HealthcheckApi",
6 | "com.netflix.iep.lwc.fwd.admin.Api"
7 | ]
8 |
9 | actors = ${?atlas.pekko.actors} [
10 | {
11 | name = "scalingPolicies"
12 | class = "com.netflix.iep.lwc.fwd.admin.ScalingPolicies"
13 | }
14 | ]
15 |
16 | }
17 |
18 | blocking-dispatcher {
19 | executor = "thread-pool-executor"
20 | thread-pool-executor {
21 | core-pool-size-min = 10
22 | core-pool-size-max = 10
23 | }
24 | }
25 |
26 | iep.lwc.fwding-admin {
27 | age-limit = 10m
28 | queue-size-limit = 10000
29 | edda-cache-refresh-interval = 30m
30 | fwd-metric-info-purge-limit = 5d
31 | cw-expr-uri = "http://localhost:7102/api/v2/cloudwatch-forwarding/clusters/%s"
32 | user = "iep_lwc_fwding_admin"
33 | accountEnvMapping = {
34 | 123 = local
35 | }
36 |
37 | // The host name in the properties 'cw-alarms-uri', 'ec2-policies-uri' and 'titus-policies-uri'
38 | // uses the following pattern to lookup the appropriate Edda deployment.
39 | // "prefix-%s.%s.%s.suffix".format(account, region, env)
40 | cw-alarms-uri = "http://localhost:7103/api/v2/aws/alarms;namespace=NFLX/EPIC;_expand:(alarmName,metricName,dimensions:(name,value))"
41 | ec2-policies-uri = "http://localhost:7103/api/v2/aws/scalingPolicies;_expand:(policyName,alarms:(alarmName))"
42 | titus-policies-uri = "http://localhost:7103/api/v3/netflix/titusscalingPolicies;_expand:(jobId,id,scalingPolicy:(targetPolicyDescriptor:(customizedMetricSpecification)))"
43 | }
44 |
--------------------------------------------------------------------------------
/iep-lwc-loadgen/src/main/scala/com/netflix/iep/loadgen/AppConfiguration.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2014-2025 Netflix, Inc.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.netflix.iep.loadgen
17 |
18 | import org.apache.pekko.actor.ActorSystem
19 | import com.netflix.atlas.eval.stream.Evaluator
20 | import com.netflix.spectator.api.NoopRegistry
21 | import com.netflix.spectator.api.Registry
22 | import com.typesafe.config.Config
23 | import com.typesafe.config.ConfigFactory
24 | import org.springframework.context.annotation.Bean
25 | import org.springframework.context.annotation.Configuration
26 |
27 | import java.util.Optional
28 |
29 | @Configuration
30 | class AppConfiguration {
31 |
32 | @Bean
33 | def loadGenService(
34 | config: Optional[Config],
35 | registry: Optional[Registry],
36 | evaluator: Evaluator,
37 | system: ActorSystem
38 | ): LoadGenService = {
39 | val c = config.orElseGet(() => ConfigFactory.load())
40 | val r = registry.orElseGet(() => new NoopRegistry)
41 | new LoadGenService(c, r, evaluator, system)
42 | }
43 | }
44 |
--------------------------------------------------------------------------------
/iep-archaius/src/main/scala/com/netflix/iep/archaius/AppConfiguration.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2014-2025 Netflix, Inc.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.netflix.iep.archaius
17 |
18 | import com.netflix.iep.aws2.AwsClientFactory
19 | import com.netflix.spectator.api.NoopRegistry
20 | import com.netflix.spectator.api.Registry
21 | import org.springframework.context.annotation.Bean
22 | import org.springframework.context.annotation.Configuration
23 | import software.amazon.awssdk.services.dynamodb.DynamoDbClient
24 |
25 | import java.util.Optional
26 |
27 | @Configuration
28 | class AppConfiguration {
29 |
30 | @Bean
31 | def dynamoDbClient(factory: AwsClientFactory): DynamoDbClient = {
32 | factory.getInstance(classOf[DynamoDbClient])
33 | }
34 |
35 | @Bean
36 | def dynamoService(client: DynamoDbClient): DynamoService = {
37 | new DynamoService(client)
38 | }
39 |
40 | @Bean
41 | def propertiesContext(registry: Optional[Registry]): PropertiesContext = {
42 | val r = registry.orElseGet(() => new NoopRegistry)
43 | new PropertiesContext(r)
44 | }
45 | }
46 |
--------------------------------------------------------------------------------
/iep-lwc-fwding-admin/src/test/scala/com/netflix/iep/lwc/fwd/admin/ScalingPoliciesTestImpl.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2014-2025 Netflix, Inc.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.netflix.iep.lwc.fwd.admin
17 |
18 | import org.apache.pekko.NotUsed
19 | import org.apache.pekko.stream.scaladsl.Flow
20 | import com.typesafe.config.Config
21 |
22 | import scala.concurrent.ExecutionContext
23 |
24 | class ScalingPoliciesTestImpl(
25 | config: Config,
26 | dao: ScalingPoliciesDao,
27 | policies: Map[EddaEndpoint, List[ScalingPolicy]] = Map.empty[EddaEndpoint, List[ScalingPolicy]]
28 | ) extends ScalingPolicies(config, dao) {
29 |
30 | scalingPolicies = policies
31 | override def startPeriodicTimer(): Unit = {}
32 | }
33 |
34 | class ScalingPoliciesDaoTestImpl(
35 | policies: Map[EddaEndpoint, List[ScalingPolicy]]
36 | ) extends ScalingPoliciesDao {
37 |
38 | protected implicit val ec: ExecutionContext = scala.concurrent.ExecutionContext.global
39 |
40 | override def getScalingPolicies: Flow[EddaEndpoint, List[ScalingPolicy], NotUsed] = {
41 | Flow[EddaEndpoint]
42 | .map(policies(_))
43 | }
44 | }
45 |
--------------------------------------------------------------------------------
/iep-lwc-fwding-admin/src/main/scala/com/netflix/iep/lwc/fwd/admin/Timer.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2014-2025 Netflix, Inc.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.netflix.iep.lwc.fwd.admin
17 |
18 | import java.util.concurrent.TimeUnit
19 |
20 | import com.netflix.spectator.api.Clock
21 | import com.netflix.spectator.api.Registry
22 |
23 | object Timer {
24 |
25 | def measure[T](
26 | f: => T,
27 | name: String,
28 | clock: Clock,
29 | record: (String, List[String], Long) => Unit
30 | ): T = {
31 | var tags = List.empty[String]
32 | val start = clock.monotonicTime()
33 | try {
34 | f
35 | } catch {
36 | case e: Exception =>
37 | tags = List("exception", e.getClass.getSimpleName)
38 | throw e
39 | } finally {
40 | record(name, tags, clock.monotonicTime() - start)
41 | }
42 | }
43 |
44 | def record[T](f: => T, name: String, registry: Registry): T = {
45 | measure(
46 | f,
47 | name,
48 | registry.clock(),
49 | (n: String, t: List[String], d: Long) => registry.timer(n, t*).record(d, TimeUnit.NANOSECONDS)
50 | )
51 | }
52 |
53 | }
54 |
--------------------------------------------------------------------------------
/atlas-cloudwatch/src/main/scala/com/netflix/atlas/util/XXHasher.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2014-2025 Netflix, Inc.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.netflix.atlas.util
17 |
18 | import net.openhft.hashing.LongHashFunction
19 |
20 | /**
21 | * Simple wrapper and utility functions for hashing with the OpenHFT XXHash implementation.
22 | */
23 | object XXHasher {
24 |
25 | private val hasher = LongHashFunction.xx
26 |
27 | def hash(value: Array[Byte]): Long = hasher.hashBytes(value)
28 |
29 | def hash(value: Array[Byte], offset: Int, length: Int): Long =
30 | hasher.hashBytes(value, offset, length)
31 |
32 | def hash(value: String): Long = hasher.hashChars(value)
33 |
34 | def updateHash(hash: Long, value: Array[Byte]): Long = 2251 * hash ^ 37 * hasher.hashBytes(value)
35 |
36 | def updateHash(hash: Long, value: Array[Byte], offset: Int, length: Int): Long =
37 | 2251 * hash ^ 37 * hasher.hashBytes(value, offset, length)
38 |
39 | def updateHash(hash: Long, value: String): Long = 2251 * hash ^ 37 * hasher.hashChars(value)
40 |
41 | def combineHashes(hash_a: Long, hash_b: Long): Long = 2251 * hash_a ^ 37 * hash_b
42 | }
43 |
--------------------------------------------------------------------------------
/atlas-persistence/src/main/scala/com/netflix/atlas/persistence/FileUtil.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2014-2025 Netflix, Inc.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.netflix.atlas.persistence
17 |
18 | import java.io.File
19 | import java.nio.file.Files
20 |
21 | import com.typesafe.scalalogging.StrictLogging
22 |
23 | import scala.jdk.StreamConverters.*
24 | import scala.util.Using
25 |
26 | object FileUtil extends StrictLogging {
27 |
28 | def delete(f: File): Unit = {
29 | try {
30 | Files.delete(f.toPath)
31 | logger.debug(s"deleted file $f")
32 | } catch {
33 | case e: Exception => logger.error(s"failed to delete path $f", e)
34 | }
35 | }
36 |
37 | def listFiles(f: File): List[File] = {
38 | try {
39 | Using.resource(Files.list(f.toPath)) { dir =>
40 | dir.toScala(List).map(_.toFile)
41 | }
42 | } catch {
43 | case e: Exception =>
44 | logger.error(s"failed to list files for: $f", e)
45 | Nil
46 | }
47 | }
48 |
49 | def isTmpFile(f: File): Boolean = {
50 | isTmpFile(f.getName)
51 | }
52 |
53 | def isTmpFile(s: String): Boolean = {
54 | s.endsWith(RollingFileWriter.TmpFileSuffix)
55 | }
56 | }
57 |
--------------------------------------------------------------------------------
/iep-lwc-bridge/src/main/scala/com/netflix/iep/lwc/StatsApi.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2014-2025 Netflix, Inc.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.netflix.iep.lwc
17 |
18 | import org.apache.pekko.http.scaladsl.model.HttpEntity
19 | import org.apache.pekko.http.scaladsl.model.HttpResponse
20 | import org.apache.pekko.http.scaladsl.model.MediaTypes
21 | import org.apache.pekko.http.scaladsl.model.StatusCodes
22 | import org.apache.pekko.http.scaladsl.server.Directives.*
23 | import org.apache.pekko.http.scaladsl.server.Route
24 | import com.netflix.atlas.pekko.CustomDirectives.*
25 | import com.netflix.atlas.pekko.WebApi
26 | import com.netflix.atlas.json.Json
27 |
28 | /**
29 | * Dump the stats for the expressions flowing through the bridge.
30 | */
31 | class StatsApi(evaluator: ExpressionsEvaluator) extends WebApi {
32 |
33 | override def routes: Route = {
34 | endpointPathPrefix("api" / "v1" / "stats") {
35 | get {
36 | val stats = Json.encode(evaluator.stats)
37 | val entity = HttpEntity(MediaTypes.`application/json`, stats)
38 | val response = HttpResponse(StatusCodes.OK, Nil, entity)
39 | complete(response)
40 | }
41 | }
42 | }
43 | }
44 |
--------------------------------------------------------------------------------
/iep-lwc-fwding-admin/src/test/scala/com/netflix/iep/lwc/fwd/admin/CwForwardingConfigSuite.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2014-2025 Netflix, Inc.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.netflix.iep.lwc.fwd.admin
17 |
18 | import org.apache.pekko.actor.ActorSystem
19 | import com.netflix.atlas.eval.stream.Evaluator
20 | import com.netflix.spectator.api.NoopRegistry
21 | import com.typesafe.config.ConfigFactory
22 | import com.typesafe.scalalogging.StrictLogging
23 | import munit.FunSuite
24 |
25 | class CwForwardingConfigSuite extends FunSuite with CwForwardingTestConfig with StrictLogging {
26 |
27 | private val config = ConfigFactory.load()
28 | private val system = ActorSystem()
29 |
30 | val validations = new CwExprValidations(
31 | new ExprInterpreter(config),
32 | new Evaluator(config, new NoopRegistry(), system)
33 | )
34 |
35 | test("Skip the given checks") {
36 | val config = makeConfig(checksToSkip = List("DefaultDimension"))
37 | assert(config.shouldSkip("DefaultDimension"))
38 | }
39 |
40 | test("Do checks that are not flagged to skip") {
41 | val config = makeConfig(checksToSkip = List("DefaultDimension"))
42 | assert(!config.shouldSkip("SingleExpression"))
43 | }
44 |
45 | }
46 |
--------------------------------------------------------------------------------
/iep-lwc-fwding-admin/src/main/scala/com/netflix/iep/lwc/fwd/admin/SchemaValidation.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2014-2025 Netflix, Inc.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.netflix.iep.lwc.fwd.admin
17 |
18 | import com.fasterxml.jackson.databind.JsonNode
19 | import com.github.fge.jsonschema.main.JsonSchema
20 | import com.github.fge.jsonschema.main.JsonSchemaFactory
21 | import com.netflix.atlas.json.Json
22 | import com.typesafe.scalalogging.StrictLogging
23 |
24 | import scala.io.Source
25 | import scala.jdk.CollectionConverters.*
26 |
27 | class SchemaValidation extends StrictLogging {
28 |
29 | val schema: JsonSchema = {
30 | val reader = Source.fromResource("cw-fwding-cfg-schema.json").reader()
31 | try {
32 | JsonSchemaFactory
33 | .byDefault()
34 | .getJsonSchema(Json.decode[SchemaCfg](reader).schema)
35 | } finally {
36 | reader.close()
37 | }
38 | }
39 |
40 | def validate(json: JsonNode): Unit = {
41 | val pr = schema.validate(json)
42 | if (!pr.isSuccess) {
43 | throw new IllegalArgumentException(
44 | pr.asScala.map(_.getMessage).mkString("\n")
45 | )
46 | }
47 | }
48 |
49 | }
50 |
51 | case class SchemaCfg(schema: JsonNode, validationHook: String)
52 |
--------------------------------------------------------------------------------
/iep-lwc-fwding-admin/src/main/scala/com/netflix/iep/lwc/fwd/admin/ExprInterpreter.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2014-2025 Netflix, Inc.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.netflix.iep.lwc.fwd.admin
17 |
18 | import org.apache.pekko.http.scaladsl.model.Uri
19 | import com.netflix.atlas.core.model.CustomVocabulary
20 | import com.netflix.atlas.core.model.ModelExtractors
21 | import com.netflix.atlas.core.model.StyleExpr
22 | import com.netflix.atlas.core.stacklang.Interpreter
23 | import com.typesafe.config.Config
24 |
25 | class ExprInterpreter(config: Config) {
26 |
27 | private val interpreter = Interpreter(new CustomVocabulary(config).allWords)
28 |
29 | def eval(atlasUri: String): List[StyleExpr] = {
30 | eval(Uri(atlasUri))
31 | }
32 |
33 | def eval(uri: Uri): List[StyleExpr] = {
34 | val expr = uri.query().get("q").getOrElse {
35 | throw new IllegalArgumentException(
36 | s"missing required URI parameter `q`: $uri"
37 | )
38 | }
39 |
40 | doEval(expr)
41 | }
42 |
43 | def doEval(expr: String): List[StyleExpr] = {
44 | interpreter.execute(expr).stack.map {
45 | case ModelExtractors.PresentationType(t) => t
46 | case v => throw new MatchError(v)
47 | }
48 | }
49 | }
50 |
--------------------------------------------------------------------------------
/atlas-persistence/src/main/scala/com/netflix/atlas/persistence/AppConfiguration.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2014-2025 Netflix, Inc.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.netflix.atlas.persistence
17 |
18 | import com.netflix.iep.aws2.AwsClientFactory
19 | import org.apache.pekko.actor.ActorSystem
20 | import com.netflix.spectator.api.NoopRegistry
21 | import com.netflix.spectator.api.Registry
22 | import com.typesafe.config.Config
23 | import com.typesafe.config.ConfigFactory
24 | import org.springframework.context.annotation.Bean
25 | import org.springframework.context.annotation.Configuration
26 |
27 | import java.util.Optional
28 |
29 | @Configuration
30 | class AppConfiguration {
31 |
32 | @Bean
33 | def s3CopyService(
34 | awsFactory: AwsClientFactory,
35 | config: Optional[Config],
36 | registry: Optional[Registry],
37 | system: ActorSystem
38 | ): S3CopyService = {
39 | val c = config.orElseGet(() => ConfigFactory.load())
40 | val r = registry.orElseGet(() => new NoopRegistry)
41 | new S3CopyService(awsFactory, c, r, system)
42 | }
43 |
44 | @Bean
45 | def localFilePersistService(service: S3CopyService): LocalFilePersistService = {
46 | new LocalFilePersistService(service.config, service.registry, service, service.system)
47 | }
48 | }
49 |
--------------------------------------------------------------------------------
/iep-lwc-cloudwatch/src/main/scala/com/netflix/iep/lwc/AppConfiguration.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2014-2025 Netflix, Inc.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.netflix.iep.lwc
17 |
18 | import org.apache.pekko.actor.ActorSystem
19 | import com.netflix.atlas.eval.stream.Evaluator
20 | import com.netflix.iep.aws2.AwsClientFactory
21 | import com.netflix.spectator.api.NoopRegistry
22 | import com.netflix.spectator.api.Registry
23 | import com.typesafe.config.Config
24 | import com.typesafe.config.ConfigFactory
25 | import org.springframework.context.annotation.Bean
26 | import org.springframework.context.annotation.Configuration
27 |
28 | import java.util.Optional
29 |
30 | @Configuration
31 | class AppConfiguration {
32 |
33 | @Bean
34 | def configStats: ConfigStats = {
35 | new ConfigStats
36 | }
37 |
38 | @Bean
39 | def forwardingService(
40 | config: Optional[Config],
41 | registry: Optional[Registry],
42 | configStats: ConfigStats,
43 | evaluator: Evaluator,
44 | factory: AwsClientFactory,
45 | system: ActorSystem
46 | ): ForwardingService = {
47 | val c = config.orElseGet(() => ConfigFactory.load())
48 | val r = registry.orElseGet(() => new NoopRegistry)
49 | new ForwardingService(c, r, configStats, evaluator, factory, system)
50 | }
51 | }
52 |
--------------------------------------------------------------------------------
/atlas-slotting/src/main/scala/com/netflix/atlas/slotting/Util.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2014-2025 Netflix, Inc.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.netflix.atlas.slotting
17 |
18 | import java.time.Duration
19 | import java.util.concurrent.ScheduledFuture
20 |
21 | import com.netflix.iep.config.NetflixEnvironment
22 | import com.netflix.spectator.api.Registry
23 | import com.netflix.spectator.impl.Scheduler
24 | import com.typesafe.config.Config
25 | import com.typesafe.scalalogging.StrictLogging
26 |
27 | object Util extends StrictLogging {
28 |
29 | def getLongOrDefault(config: Config, basePath: String): Long = {
30 | val env = NetflixEnvironment.accountEnv()
31 | val region = NetflixEnvironment.region()
32 |
33 | if (config.hasPath(s"$basePath.$env.$region"))
34 | config.getLong(s"$basePath.$env.$region")
35 | else
36 | config.getLong(s"$basePath.default")
37 | }
38 |
39 | def startScheduler(
40 | registry: Registry,
41 | name: String,
42 | interval: Duration,
43 | fn: () => Unit
44 | ): ScheduledFuture[?] = {
45 | val scheduler = new Scheduler(registry, name, 2)
46 | val options = new Scheduler.Options()
47 | .withFrequency(Scheduler.Policy.FIXED_RATE_SKIP_IF_LONG, interval)
48 | scheduler.schedule(options, () => fn())
49 | }
50 |
51 | }
52 |
--------------------------------------------------------------------------------
/atlas-aggregator/src/main/scala/com/netflix/atlas/aggregator/AtlasAggregatorService.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2014-2025 Netflix, Inc.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.netflix.atlas.aggregator
17 |
18 | import com.netflix.iep.service.AbstractService
19 | import com.netflix.spectator.api.Clock
20 | import com.netflix.spectator.api.Id
21 | import com.netflix.spectator.api.Registry
22 | import com.netflix.spectator.atlas.AtlasRegistry
23 | import com.typesafe.config.Config
24 |
25 | class AtlasAggregatorService(
26 | config: Config,
27 | clock: Clock,
28 | registry: Registry,
29 | client: PekkoClient
30 | ) extends AbstractService
31 | with Aggregator {
32 |
33 | private val aggrCfg = new AggrConfig(config, registry, client)
34 |
35 | private val aggrRegistry = new AtlasRegistry(clock, aggrCfg)
36 |
37 | def atlasRegistry: Registry = aggrRegistry
38 |
39 | override def startImpl(): Unit = {
40 | aggrRegistry.start()
41 | }
42 |
43 | override def stopImpl(): Unit = {
44 | aggrRegistry.stop()
45 | }
46 |
47 | def lookup: AtlasRegistry = {
48 | aggrRegistry
49 | }
50 |
51 | override def add(id: Id, value: Double): Unit = {
52 | aggrRegistry.counter(id).add(value)
53 | }
54 |
55 | override def max(id: Id, value: Double): Unit = {
56 | aggrRegistry.maxGauge(id).set(value)
57 | }
58 | }
59 |
--------------------------------------------------------------------------------
/atlas-cloudwatch/src/main/scala/com/netflix/atlas/cloudwatch/MetricMetadata.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2014-2025 Netflix, Inc.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.netflix.atlas.cloudwatch
17 |
18 | import software.amazon.awssdk.services.cloudwatch.model.Datapoint
19 | import software.amazon.awssdk.services.cloudwatch.model.Dimension
20 | import software.amazon.awssdk.services.cloudwatch.model.GetMetricStatisticsRequest
21 | import software.amazon.awssdk.services.cloudwatch.model.Statistic
22 |
23 | import java.time.Instant
24 |
25 | /**
26 | * Metadata for a particular metric to retrieve from CloudWatch.
27 | */
28 | case class MetricMetadata(
29 | category: MetricCategory,
30 | definition: MetricDefinition,
31 | dimensions: List[Dimension]
32 | ) {
33 |
34 | def convert(d: Datapoint): Double = definition.conversion(this, d)
35 |
36 | def toGetRequest(s: Instant, e: Instant): GetMetricStatisticsRequest = {
37 | import scala.jdk.CollectionConverters.*
38 | GetMetricStatisticsRequest
39 | .builder()
40 | .metricName(definition.name)
41 | .namespace(category.namespace)
42 | .dimensions(dimensions.asJava)
43 | .statistics(Statistic.MAXIMUM, Statistic.MINIMUM, Statistic.SUM, Statistic.SAMPLE_COUNT)
44 | .period(category.period)
45 | .startTime(s)
46 | .endTime(e)
47 | .build()
48 | }
49 | }
50 |
--------------------------------------------------------------------------------
/iep-archaius/src/main/scala/com/netflix/iep/archaius/DynamoService.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2014-2025 Netflix, Inc.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.netflix.iep.archaius
17 |
18 | import java.util.concurrent.Executors
19 | import java.util.concurrent.atomic.AtomicLong
20 | import com.netflix.iep.service.AbstractService
21 | import software.amazon.awssdk.services.dynamodb.DynamoDbClient
22 |
23 | import scala.concurrent.ExecutionContext
24 | import scala.concurrent.Future
25 |
26 | /**
27 | * Provides access to a dynamo client and a dedicated thread pool for executing
28 | * calls. Sample usage:
29 | *
30 | * ```
31 | * val future = dynamoService.execute { client =>
32 | * client.scan(new ScanRequest().withTableName("foo"))
33 | * }
34 | * ```
35 | */
36 | class DynamoService(client: DynamoDbClient) extends AbstractService {
37 |
38 | private val nextId = new AtomicLong()
39 |
40 | private val pool = Executors.newFixedThreadPool(
41 | Runtime.getRuntime.availableProcessors(),
42 | (r: Runnable) => {
43 | new Thread(r, s"dynamo-db-${nextId.getAndIncrement()}")
44 | }
45 | )
46 | private val ec = ExecutionContext.fromExecutorService(pool)
47 |
48 | override def startImpl(): Unit = ()
49 |
50 | override def stopImpl(): Unit = ()
51 |
52 | def execute[T](task: DynamoDbClient => T): Future[T] = Future(task(client))(ec)
53 | }
54 |
--------------------------------------------------------------------------------
/iep-archaius/src/test/scala/com/netflix/iep/archaius/MockDynamoDB.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2014-2025 Netflix, Inc.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.netflix.iep.archaius
17 |
18 | import software.amazon.awssdk.services.dynamodb.DynamoDbClient
19 | import software.amazon.awssdk.services.dynamodb.model.ScanRequest
20 | import software.amazon.awssdk.services.dynamodb.model.ScanResponse
21 | import software.amazon.awssdk.services.dynamodb.paginators.ScanIterable
22 |
23 | import java.lang.reflect.InvocationHandler
24 | import java.lang.reflect.Method
25 | import java.lang.reflect.Proxy
26 |
27 | class MockDynamoDB extends InvocationHandler {
28 |
29 | var scanResponse: ScanResponse = _
30 |
31 | override def invoke(proxy: Any, method: Method, args: Array[AnyRef]): AnyRef = {
32 | val client = proxy.asInstanceOf[DynamoDbClient]
33 | method.getName match {
34 | case "scan" => scanResponse
35 | case "scanPaginator" => new ScanIterable(client, args(0).asInstanceOf[ScanRequest])
36 | case _ => throw new UnsupportedOperationException(method.toString)
37 | }
38 | }
39 |
40 | def client: DynamoDbClient = {
41 | val clsLoader = Thread.currentThread().getContextClassLoader
42 | val proxy = Proxy.newProxyInstance(clsLoader, Array(classOf[DynamoDbClient]), this)
43 | proxy.asInstanceOf[DynamoDbClient]
44 | }
45 | }
46 |
--------------------------------------------------------------------------------
/iep-lwc-fwding-admin/src/test/scala/com/netflix/iep/lwc/fwd/admin/ExprInterpreterSuite.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2014-2025 Netflix, Inc.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.netflix.iep.lwc.fwd.admin
17 |
18 | import com.typesafe.config.ConfigFactory
19 | import munit.FunSuite
20 |
21 | class ExprInterpreterSuite extends FunSuite {
22 |
23 | val interpreter = new ExprInterpreter(ConfigFactory.load())
24 |
25 | test("Should be able to parse a valid expression") {
26 | val atlasUri = makeAtlasUri(
27 | expr = """
28 | |name,nodejs.cpuUsage,:eq,
29 | |:node-avg,
30 | |(,nf.account,nf.asg,),:by
31 | """.stripMargin
32 | )
33 |
34 | assert(interpreter.eval(atlasUri).size == 1)
35 | }
36 |
37 | test("Should fail for invalid expression") {
38 | val atlasUri = makeAtlasUri(
39 | expr = """
40 | |name,nodejs.cpuUsage,:e,
41 | |:node-avg,
42 | |(,nf.account,nf.asg,),:by
43 | """.stripMargin
44 | )
45 |
46 | val exception = intercept[IllegalStateException](
47 | interpreter.eval(atlasUri)
48 | )
49 | assert(exception.getMessage == "unknown word ':e'")
50 | }
51 |
52 | def makeAtlasUri(
53 | expr: String
54 | ): String = {
55 | s"""http://localhost/api/v1/graph?q=$expr"""
56 | .replace("\n", "")
57 | .trim
58 | }
59 |
60 | }
61 |
--------------------------------------------------------------------------------
/atlas-aggregator/src/test/scala/com/netflix/atlas/aggregator/AppConfigurationSuite.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2014-2025 Netflix, Inc.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.netflix.atlas.aggregator
17 |
18 | import com.netflix.spectator.api.NoopRegistry
19 | import com.typesafe.config.ConfigFactory
20 | import munit.FunSuite
21 | import org.springframework.context.annotation.AnnotationConfigApplicationContext
22 |
23 | import scala.util.Using
24 |
25 | class AppConfigurationSuite extends FunSuite {
26 |
27 | test("aggr service") {
28 | Using.resource(new AnnotationConfigApplicationContext()) { context =>
29 | context.scan("com.netflix")
30 | context.refresh()
31 | context.start()
32 | assert(context.getBean(classOf[AtlasAggregatorService]) != null)
33 | }
34 | }
35 |
36 | test("aggr config should use prefix") {
37 | val config = ConfigFactory.parseString("""
38 | |netflix.atlas.aggr.registry.atlas.uri = "test"
39 | """.stripMargin)
40 | val aggr = new AggrConfig(config, new NoopRegistry, null)
41 | assertEquals(aggr.uri(), "test")
42 | }
43 |
44 | test("aggr config should use default for missing props") {
45 | val config = ConfigFactory.parseString("""
46 | |netflix.atlas.aggr.registry.atlas.uri = "test"
47 | """.stripMargin)
48 | val aggr = new AggrConfig(config, new NoopRegistry, null)
49 | assertEquals(aggr.batchSize(), 10000)
50 | }
51 | }
52 |
--------------------------------------------------------------------------------
/iep-lwc-bridge/src/main/scala/com/netflix/iep/lwc/AppConfiguration.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2014-2025 Netflix, Inc.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.netflix.iep.lwc
17 |
18 | import com.netflix.iep.config.DynamicConfigManager
19 | import org.apache.pekko.actor.ActorSystem
20 | import com.netflix.spectator.api.NoopRegistry
21 | import com.netflix.spectator.api.Registry
22 | import com.typesafe.config.Config
23 | import com.typesafe.config.ConfigFactory
24 | import org.springframework.context.annotation.Bean
25 | import org.springframework.context.annotation.Configuration
26 |
27 | import java.util.Optional
28 |
29 | @Configuration
30 | class AppConfiguration {
31 |
32 | @Bean
33 | def expressionsEvaluator(
34 | config: Optional[DynamicConfigManager],
35 | registry: Optional[Registry]
36 | ): ExpressionsEvaluator = {
37 | val c = config.orElseGet(() => DynamicConfigManager.create(ConfigFactory.load()))
38 | val r = registry.orElseGet(() => new NoopRegistry)
39 | new ExpressionsEvaluator(c, r)
40 | }
41 |
42 | @Bean
43 | def exprUpdateService(
44 | config: Optional[Config],
45 | registry: Optional[Registry],
46 | evaluator: ExpressionsEvaluator,
47 | system: ActorSystem
48 | ): ExprUpdateService = {
49 | val c = config.orElseGet(() => ConfigFactory.load())
50 | val r = registry.orElseGet(() => new NoopRegistry)
51 | new ExprUpdateService(c, r, evaluator, system)
52 | }
53 | }
54 |
--------------------------------------------------------------------------------
/atlas-druid/src/main/resources/application.conf:
--------------------------------------------------------------------------------
1 |
2 | atlas {
3 |
4 | // URI for the druid service
5 | druid {
6 | //uri = "http://localhost:7103/druid/v2"
7 |
8 | // Interval used when fetching metadata about data sources that are available
9 | metadata-interval = 21d
10 |
11 | // Interval used for tag queries
12 | tags-interval = 6h
13 |
14 | // Maximum size for intermediate data response. If it is exceeded, then the
15 | // processing will be fail early.
16 | max-data-size = 2g
17 |
18 | // Filter for the set of allowed data sources. Only datasources that match this regex will
19 | // be exposed.
20 | datasource-filter = ".*"
21 |
22 | // Should it ignore failures to fetch metadata for a data source. In these cases that data
23 | // source will not be available to query until it is fixed.
24 | datasource-ignore-metadata-failures = false
25 |
26 | // Normalize rates by default in line with Atlas rates convention
27 | normalize-rates = true
28 | }
29 |
30 | core.model.step = 5s
31 |
32 | pekko {
33 | api-endpoints = ${?atlas.pekko.api-endpoints} [
34 | "com.netflix.atlas.druid.ExplainApi",
35 | "com.netflix.atlas.druid.ForeachApi"
36 | ]
37 |
38 | actors = [
39 | {
40 | name = "deadLetterStats"
41 | class = "com.netflix.atlas.pekko.DeadLetterStatsActor"
42 | },
43 | {
44 | name = "db"
45 | class = "com.netflix.atlas.druid.DruidDatabaseActor"
46 | }
47 | ]
48 | }
49 | }
50 |
51 | pekko.http {
52 |
53 | server.request-timeout = 55s
54 |
55 | host-connection-pool {
56 | max-open-requests = 1024
57 | max-connections = 1024
58 |
59 | idle-timeout = 120s
60 | client.idle-timeout = 30s
61 |
62 | // Adjust the log level for the header logger, druid has invalid ETag headers
63 | // that cause a lot of warnings.
64 | client.parsing.illegal-header-warnings = off
65 |
66 | // https://github.com/akka/akka-http/issues/1836
67 | response-entity-subscription-timeout = 35s
68 | }
69 | }
70 |
--------------------------------------------------------------------------------
/atlas-slotting/src/main/java/com/netflix/atlas/slotting/Gzip.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2014-2025 Netflix, Inc.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.netflix.atlas.slotting;
17 |
18 | import java.io.ByteArrayInputStream;
19 | import java.io.ByteArrayOutputStream;
20 | import java.io.IOException;
21 | import java.nio.charset.StandardCharsets;
22 | import java.util.zip.GZIPInputStream;
23 | import java.util.zip.GZIPOutputStream;
24 |
25 | final class Gzip {
26 | private Gzip() {
27 | }
28 |
29 | static byte[] compressString(String str) throws IOException {
30 | return compress(str.getBytes(StandardCharsets.UTF_8));
31 | }
32 |
33 | static String decompressString(byte[] bytes) throws IOException {
34 | return new String(decompress(bytes), StandardCharsets.UTF_8);
35 | }
36 |
37 | private static byte[] compress(byte[] bytes) throws IOException {
38 | ByteArrayOutputStream baos = new ByteArrayOutputStream();
39 |
40 | try (GZIPOutputStream out = new GZIPOutputStream(baos)) {
41 | out.write(bytes);
42 | }
43 |
44 | return baos.toByteArray();
45 | }
46 |
47 | private static byte[] decompress(byte[] bytes) throws IOException {
48 | ByteArrayOutputStream baos = new ByteArrayOutputStream();
49 |
50 | try (GZIPInputStream in = new GZIPInputStream(new ByteArrayInputStream(bytes))) {
51 | byte[] buffer = new byte[4096];
52 | int length;
53 | while ((length = in.read(buffer)) > 0) {
54 | baos.write(buffer, 0, length);
55 | }
56 | }
57 |
58 | return baos.toByteArray();
59 | }
60 | }
61 |
--------------------------------------------------------------------------------
/atlas-aggregator/src/main/scala/com/netflix/atlas/aggregator/CaffeineCache.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2014-2025 Netflix, Inc.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.netflix.atlas.aggregator
17 |
18 | import com.github.benmanes.caffeine.cache.Caffeine
19 | import com.netflix.spectator.atlas.impl.QueryIndex
20 | import com.netflix.spectator.impl.Cache
21 |
22 | /** Cache implementation to use with the query index. */
23 | class CaffeineCache[T] extends Cache[String, java.util.List[QueryIndex[T]]] {
24 |
25 | private val delegate = Caffeine
26 | .newBuilder()
27 | .maximumSize(10_000)
28 | .build[String, java.util.List[QueryIndex[T]]]()
29 |
30 | override def get(key: String): java.util.List[QueryIndex[T]] = {
31 | delegate.getIfPresent(key)
32 | }
33 |
34 | override def peek(key: String): java.util.List[QueryIndex[T]] = {
35 | throw new UnsupportedOperationException()
36 | }
37 |
38 | override def put(key: String, value: java.util.List[QueryIndex[T]]): Unit = {
39 | delegate.put(key, value)
40 | }
41 |
42 | override def computeIfAbsent(
43 | key: String,
44 | f: java.util.function.Function[String, java.util.List[QueryIndex[T]]]
45 | ): java.util.List[QueryIndex[T]] = {
46 | delegate.get(key, f)
47 | }
48 |
49 | override def clear(): Unit = {
50 | delegate.invalidateAll()
51 | }
52 |
53 | override def size(): Int = {
54 | delegate.estimatedSize().toInt
55 | }
56 |
57 | override def asMap(): java.util.Map[String, java.util.List[QueryIndex[T]]] = {
58 | java.util.Collections.emptyMap()
59 | }
60 | }
61 |
--------------------------------------------------------------------------------
/iep-lwc-cloudwatch/src/main/scala/com/netflix/iep/lwc/StatsApi.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2014-2025 Netflix, Inc.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.netflix.iep.lwc
17 |
18 | import org.apache.pekko.http.scaladsl.model.HttpEntity
19 | import org.apache.pekko.http.scaladsl.model.HttpResponse
20 | import org.apache.pekko.http.scaladsl.model.MediaTypes
21 | import org.apache.pekko.http.scaladsl.model.StatusCodes
22 | import org.apache.pekko.http.scaladsl.server.Directives.*
23 | import org.apache.pekko.http.scaladsl.server.Route
24 | import com.netflix.atlas.pekko.CustomDirectives.*
25 | import com.netflix.atlas.pekko.WebApi
26 | import com.netflix.atlas.json.Json
27 |
28 | /**
29 | * Dump the stats for the expressions flowing through the bridge.
30 | */
31 | class StatsApi(configStats: ConfigStats) extends WebApi {
32 |
33 | override def routes: Route = {
34 | endpointPathPrefix("api" / "v1" / "stats") {
35 | pathEndOrSingleSlash {
36 | get {
37 | val stats = Json.encode(configStats.snapshot)
38 | val entity = HttpEntity(MediaTypes.`application/json`, stats)
39 | val response = HttpResponse(StatusCodes.OK, Nil, entity)
40 | complete(response)
41 | }
42 | } ~
43 | path(Remaining) { cluster =>
44 | get {
45 | val stats = Json.encode(configStats.snapshotForCluster(cluster))
46 | val entity = HttpEntity(MediaTypes.`application/json`, stats)
47 | val response = HttpResponse(StatusCodes.OK, Nil, entity)
48 | complete(response)
49 | }
50 | }
51 | }
52 | }
53 | }
54 |
--------------------------------------------------------------------------------
/atlas-persistence/src/main/scala/com/netflix/atlas/persistence/S3CopyUtils.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2014-2025 Netflix, Inc.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.netflix.atlas.persistence
17 |
18 | import java.io.File
19 | import java.security.MessageDigest
20 |
21 | object S3CopyUtils {
22 |
23 | def isInactive(f: File, maxInactiveMs: Long, now: Long = System.currentTimeMillis): Boolean = {
24 | val lastModified = f.lastModified()
25 | if (lastModified == 0) false
26 | else now > lastModified + maxInactiveMs
27 | }
28 |
29 | def shouldProcess(
30 | file: File,
31 | activeFiles: Set[String],
32 | maxInactiveMs: Long,
33 | isTmpFile: String => Boolean = FileUtil.isTmpFile
34 | ): Boolean = {
35 | if (activeFiles.contains(file.getName)) false
36 | else if (isTmpFile(file.getName)) {
37 | if (isInactive(file, maxInactiveMs)) true else false
38 | } else true
39 | }
40 |
41 | def buildS3Key(
42 | fileName: String,
43 | prefix: String,
44 | hourLen: Int = HourlyRollingWriter.HourStringLen
45 | ): String = {
46 | val hour = fileName.substring(0, hourLen)
47 | val s3FileName = fileName.substring(hourLen + 1)
48 | val hourPath = hash(s"$prefix/$hour")
49 | val startMinute = S3CopySink.extractMinuteRange(fileName)
50 | s"$hourPath/$startMinute/$s3FileName"
51 | }
52 |
53 | def hash(path: String): String = {
54 | val md = MessageDigest.getInstance("MD5")
55 | md.update(path.getBytes("UTF-8"))
56 | val digest = md.digest()
57 | val hexBytes = digest.take(2).map("%02x".format(_)).mkString
58 | val randomPrefix = hexBytes.take(3)
59 | s"$randomPrefix/$path"
60 | }
61 | }
62 |
--------------------------------------------------------------------------------
/atlas-cloudwatch/src/test/scala/com/netflix/atlas/cloudwatch/ConfigAccountSupplierSuite.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2014-2025 Netflix, Inc.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.netflix.atlas.cloudwatch
17 |
18 | import com.typesafe.config.ConfigException.Missing
19 | import com.typesafe.config.ConfigFactory
20 | import munit.FunSuite
21 | import software.amazon.awssdk.regions.Region
22 |
23 | class ConfigAccountSupplierSuite extends FunSuite {
24 |
25 | val rules = new CloudWatchRules(ConfigFactory.load())
26 |
27 | test("test config") {
28 | val cfg = ConfigFactory.load()
29 | val accts = new ConfigAccountSupplier(cfg, rules)
30 | val map = accts.accounts
31 | assertEquals(map.size, 3)
32 | assertEquals(map("000000000001"), accts.defaultRegions.map(r => r -> accts.namespaces).toMap)
33 | assertEquals(map("000000000002"), accts.defaultRegions.map(r => r -> accts.namespaces).toMap)
34 | assertEquals(map("000000000003"), Map(Region.US_EAST_1 -> accts.namespaces))
35 | }
36 |
37 | test("empty accounts") {
38 | val cfg = ConfigFactory.parseString("""
39 | |atlas.cloudwatch.account.polling {
40 | | default-regions = ["us-east-1"]
41 | | accounts = []
42 | |}
43 | """.stripMargin)
44 | val accts = new ConfigAccountSupplier(cfg, rules)
45 | assert(accts.accounts.isEmpty)
46 | }
47 |
48 | test("missing accounts") {
49 | val cfg = ConfigFactory.parseString("""
50 | |atlas.cloudwatch.account.polling {
51 | | default-regions = ["us-east-1"]
52 | |}
53 | """.stripMargin)
54 | intercept[Missing] {
55 | new ConfigAccountSupplier(cfg, rules)
56 | }
57 | }
58 | }
59 |
--------------------------------------------------------------------------------
/atlas-persistence/src/main/scala/com/netflix/atlas/persistence/PersistenceApi.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2014-2025 Netflix, Inc.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.netflix.atlas.persistence
17 |
18 | import org.apache.pekko.http.scaladsl.model.HttpResponse
19 | import org.apache.pekko.http.scaladsl.model.StatusCodes
20 | import org.apache.pekko.http.scaladsl.server.Directives.*
21 | import org.apache.pekko.http.scaladsl.server.Route
22 | import com.netflix.atlas.pekko.CustomDirectives.*
23 | import com.netflix.atlas.pekko.DiagnosticMessage
24 | import com.netflix.atlas.pekko.WebApi
25 | import com.netflix.atlas.core.model.Datapoint
26 | import com.netflix.atlas.webapi.PublishPayloads
27 |
28 | class PersistenceApi(localFileService: LocalFilePersistService) extends WebApi {
29 |
30 | override def routes: Route = {
31 | post {
32 | endpointPath("api" / "v1" / "persistence") {
33 | handleReq
34 | } ~ endpointPath("api" / "v1" / "publish") {
35 | handleReq
36 | } ~ endpointPath("api" / "v1" / "publish-fast") {
37 | // Legacy path from when there was more than one publish mode
38 | handleReq
39 | }
40 | }
41 | }
42 |
43 | private def handleReq: Route = {
44 | parseEntity(customJson(p => PublishPayloads.decodeBatchDatapoints(p))) {
45 | case Nil => complete(DiagnosticMessage.error(StatusCodes.BadRequest, "empty payload"))
46 | case dps: List[Datapoint] =>
47 | if (localFileService.isHealthy) {
48 | localFileService.persist(dps)
49 | complete(HttpResponse(StatusCodes.OK))
50 | } else {
51 | complete(DiagnosticMessage.error(StatusCodes.ServiceUnavailable, "service unhealthy"))
52 | }
53 | }
54 | }
55 | }
56 |
--------------------------------------------------------------------------------
/iep-lwc-fwding-admin/src/test/scala/com/netflix/iep/lwc/fwd/admin/TimerSuite.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2014-2025 Netflix, Inc.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.netflix.iep.lwc.fwd.admin
17 |
18 | import com.netflix.iep.lwc.fwd.admin.Timer.*
19 | import com.netflix.spectator.api.ManualClock
20 | import munit.FunSuite
21 |
22 | class TimerSuite extends FunSuite {
23 |
24 | test("Measure time for calls that succeed") {
25 | val clock = new ManualClock()
26 | clock.setMonotonicTime(1)
27 |
28 | def work(): String = {
29 | clock.setMonotonicTime(2)
30 | "done"
31 | }
32 |
33 | val timer = new TestTimer()
34 |
35 | val result = measure(work(), "workTimer", clock, timer.record)
36 | assert(result == "done")
37 |
38 | assert(timer.name == "workTimer")
39 | assert(timer.tags.isEmpty)
40 | assert(timer.duration == 1)
41 | }
42 |
43 | test("Measure time for calls that fail") {
44 | val clock = new ManualClock()
45 | clock.setMonotonicTime(1)
46 |
47 | def work(): String = {
48 | clock.setMonotonicTime(2)
49 | throw new RuntimeException("failed")
50 | }
51 |
52 | val timer = new TestTimer()
53 |
54 | intercept[RuntimeException](
55 | measure(work(), "workTimer", clock, timer.record)
56 | )
57 | assert(timer.name == "workTimer")
58 | assertEquals(timer.tags, List("exception", "RuntimeException"))
59 | assert(timer.duration == 1)
60 |
61 | }
62 | }
63 |
64 | class TestTimer(var name: String = "", var tags: List[String] = Nil, var duration: Long = -1) {
65 |
66 | def record(name: String, tags: List[String], duration: Long): Unit = {
67 | this.name = name
68 | this.tags = tags
69 | this.duration = duration
70 | }
71 | }
72 |
--------------------------------------------------------------------------------
/atlas-cloudwatch/src/main/scala/com/netflix/atlas/cloudwatch/ConfigAccountSupplier.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2014-2025 Netflix, Inc.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.netflix.atlas.cloudwatch
17 |
18 | import com.typesafe.config.Config
19 | import com.typesafe.scalalogging.StrictLogging
20 | import software.amazon.awssdk.regions.Region
21 |
22 | import scala.jdk.CollectionConverters.CollectionHasAsScala
23 |
24 | /**
25 | * Simple Typesafe config based AWS account supplier. Used for testing and in place of a supplier using internal tooling.
26 | */
27 | class ConfigAccountSupplier(
28 | config: Config,
29 | rules: CloudWatchRules
30 | ) extends AwsAccountSupplier
31 | with StrictLogging {
32 |
33 | private[cloudwatch] val namespaces = rules.rules.keySet
34 |
35 | private[cloudwatch] val defaultRegions =
36 | if (config.hasPath("atlas.cloudwatch.account.polling.default-regions"))
37 | config
38 | .getStringList("atlas.cloudwatch.account.polling.default-regions")
39 | .asScala
40 | .map(Region.of)
41 | .toList
42 | else List(Region.of(System.getenv("NETFLIX_REGION")))
43 |
44 | private val map = config
45 | .getConfigList("atlas.cloudwatch.account.polling.accounts")
46 | .asScala
47 | .map { c =>
48 | val regions = if (c.hasPath("regions")) {
49 | c.getStringList("regions").asScala.map(Region.of(_) -> namespaces).toMap
50 | } else {
51 | defaultRegions.map(_ -> namespaces).toMap
52 | }
53 | c.getString("account") -> regions
54 | }
55 | .toMap
56 | logger.debug(s"Loaded accounts: ${map}")
57 |
58 | /**
59 | * @return The non-null list of account IDs to poll for CloudWatch metrics.
60 | */
61 | override val accounts: Map[String, Map[Region, Set[String]]] = map
62 | }
63 |
--------------------------------------------------------------------------------
/atlas-cloudwatch/src/main/scala/com/netflix/atlas/cloudwatch/NetflixTagger.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2014-2025 Netflix, Inc.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.netflix.atlas.cloudwatch
17 |
18 | import com.netflix.frigga.Names
19 | import com.typesafe.config.Config
20 | import software.amazon.awssdk.services.cloudwatch.model.Dimension
21 |
22 | /**
23 | * Tag the datapoints using Frigga to extract app and cluster information based
24 | * on naming conventions used by Spinnaker and Asgard.
25 | */
26 | class NetflixTagger(config: Config) extends DefaultTagger(config) {
27 |
28 | import scala.jdk.CollectionConverters.*
29 |
30 | private val keys = config.getStringList("netflix-keys").asScala
31 |
32 | private def opt(k: String, s: String): Option[(String, String)] = {
33 | Option(s).filter(_ != "").map(v => k -> v)
34 | }
35 |
36 | override def apply(dimensions: List[Dimension]): Map[String, String] = {
37 | val baseTags = super.apply(dimensions)
38 | val resultTags = scala.collection.mutable.Map[String, String]()
39 |
40 | keys.foreach { k =>
41 | baseTags.get(k) match {
42 | case Some(v) =>
43 | val name = Names.parseName(v)
44 | resultTags ++= List(
45 | opt("nf.app", name.getApp),
46 | opt("nf.cluster", name.getCluster),
47 | opt("nf.stack", name.getStack)
48 | ).flatten
49 | case None =>
50 | // Only add default values if the keys are not already set
51 | if (!resultTags.contains("nf.app")) resultTags("nf.app") = "cloudwatch"
52 | if (!resultTags.contains("nf.cluster")) resultTags("nf.cluster") = "cloudwatch"
53 | }
54 | }
55 |
56 | // Merge resultTags with baseTags, giving priority to resultTags
57 | resultTags.toMap ++ baseTags
58 | }
59 | }
60 |
--------------------------------------------------------------------------------
/atlas-aggregator/src/main/scala/com/netflix/atlas/aggregator/FailureMessage.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2014-2025 Netflix, Inc.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.netflix.atlas.aggregator
17 |
18 | import com.netflix.atlas.pekko.DiagnosticMessage
19 | import com.netflix.atlas.core.validation.ValidationResult
20 | import com.netflix.atlas.json.Json
21 | import com.netflix.atlas.json.JsonSupport
22 |
23 | /**
24 | * Message returned to the user if some of the datapoints sent fail validation rules.
25 | *
26 | * @param `type`
27 | * Indicates whether the payload was an entirely or just partially impacted. If
28 | * all datapoints failed, then it will return an error.
29 | * @param errorCount
30 | * The number of failed datapoints in the payload.
31 | * @param message
32 | * Sampled set of failure messages to help the user debug.
33 | */
34 | case class FailureMessage(`type`: String, errorCount: Int, message: List[String])
35 | extends JsonSupport {
36 |
37 | def typeName: String = `type`
38 | }
39 |
40 | object FailureMessage {
41 |
42 | private def createMessage(
43 | level: String,
44 | message: List[ValidationResult],
45 | n: Int
46 | ): FailureMessage = {
47 | val failures = message.collect {
48 | case msg: ValidationResult.Fail => msg
49 | }
50 | // Limit encoding the tags to just the summary set
51 | val summary = failures.take(5).map { msg =>
52 | s"${msg.reason} (tags=${Json.encode(msg.tags)})"
53 | }
54 | new FailureMessage(level, n, summary)
55 | }
56 |
57 | def error(message: List[ValidationResult], n: Int): FailureMessage = {
58 | createMessage(DiagnosticMessage.Error, message, n)
59 | }
60 |
61 | def partial(message: List[ValidationResult], n: Int): FailureMessage = {
62 | createMessage("partial", message, n)
63 | }
64 | }
65 |
--------------------------------------------------------------------------------
/atlas-slotting/src/main/scala/com/netflix/atlas/slotting/AppConfiguration.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2014-2025 Netflix, Inc.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.netflix.atlas.slotting
17 |
18 | import com.netflix.iep.aws2.AwsClientFactory
19 | import com.netflix.spectator.api.NoopRegistry
20 | import com.netflix.spectator.api.Registry
21 | import com.typesafe.config.Config
22 | import com.typesafe.config.ConfigFactory
23 | import org.springframework.context.annotation.Bean
24 | import org.springframework.context.annotation.Configuration
25 | import software.amazon.awssdk.services.autoscaling.AutoScalingClient
26 | import software.amazon.awssdk.services.dynamodb.DynamoDbClient
27 | import software.amazon.awssdk.services.ec2.Ec2Client
28 |
29 | import java.util.Optional
30 |
31 | @Configuration
32 | class AppConfiguration {
33 |
34 | @Bean
35 | def slottingCache(): SlottingCache = {
36 | new SlottingCache()
37 | }
38 |
39 | @Bean
40 | def slottingService(
41 | config: Optional[Config],
42 | registry: Optional[Registry],
43 | asgClient: AutoScalingClient,
44 | ddbClient: DynamoDbClient,
45 | ec2Client: Ec2Client,
46 | cache: SlottingCache
47 | ): SlottingService = {
48 | val c = config.orElseGet(() => ConfigFactory.load())
49 | val r = registry.orElseGet(() => new NoopRegistry)
50 | new SlottingService(c, r, asgClient, ddbClient, ec2Client, cache)
51 | }
52 |
53 | @Bean
54 | def dynamoDb(factory: AwsClientFactory): DynamoDbClient = {
55 | factory.newInstance(classOf[DynamoDbClient])
56 | }
57 |
58 | @Bean
59 | def ec2(factory: AwsClientFactory): Ec2Client = {
60 | factory.newInstance(classOf[Ec2Client])
61 | }
62 |
63 | @Bean
64 | def autoScaling(factory: AwsClientFactory): AutoScalingClient = {
65 | factory.newInstance(classOf[AutoScalingClient])
66 | }
67 | }
68 |
--------------------------------------------------------------------------------
/atlas-cloudwatch/src/test/scala/com/netflix/atlas/util/XXHasherSuite.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2014-2025 Netflix, Inc.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.netflix.atlas.util
17 |
18 | import munit.FunSuite
19 |
20 | class XXHasherSuite extends FunSuite {
21 |
22 | test("hash byte array") {
23 | assertEquals(XXHasher.hash("Hello World".getBytes("UTF-8")), 7148569436472236994L)
24 | }
25 |
26 | test("hash byte array empty") {
27 | assertEquals(XXHasher.hash(new Array[Byte](0)), -1205034819632174695L)
28 | }
29 |
30 | test("hash byte array null") {
31 | intercept[NullPointerException] {
32 | XXHasher.hash(null.asInstanceOf[Array[Byte]])
33 | }
34 | }
35 |
36 | test("hash byte array offset") {
37 | assertEquals(XXHasher.hash("Hello World".getBytes("UTF-8"), 1, 5), -4877171975935371781L)
38 | }
39 |
40 | test("hash byte string") {
41 | assertEquals(XXHasher.hash("Hello World"), -7682288509216370722L)
42 | }
43 |
44 | test("updateHash byte array") {
45 | val hash = XXHasher.hash("Hello World".getBytes("UTF-8"))
46 | assertEquals(XXHasher.updateHash(hash, " from Atlas!".getBytes("UTF-8")), 6820347041909772079L)
47 | }
48 |
49 | test("updateHash byte array offset") {
50 | val hash = XXHasher.hash("Hello World".getBytes("UTF-8"))
51 | assertEquals(
52 | XXHasher.updateHash(hash, " from Atlas!".getBytes("UTF-8"), 1, 8),
53 | -2548206119163337765L
54 | )
55 | }
56 |
57 | test("updateHash string") {
58 | val hash = XXHasher.hash("Hello World")
59 | assertEquals(XXHasher.updateHash(hash, " from Atlas!"), -5099163101293074982L)
60 | }
61 |
62 | test("combineHashes") {
63 | val hashA = XXHasher.hash("Hello World")
64 | val hashB = XXHasher.hash(" from Atlas!")
65 | assertEquals(XXHasher.combineHashes(hashA, hashB), -5099163101293074982L)
66 | }
67 | }
68 |
--------------------------------------------------------------------------------
/atlas-cloudwatch/src/test/resources/application.conf:
--------------------------------------------------------------------------------
1 | atlas {
2 |
3 | poller {
4 |
5 | # Set to a large value because we don't want it running during tests
6 | frequency = 60 minutes
7 |
8 | pollers = [
9 | {
10 | name = "cloudwatch"
11 | class = "com.netflix.atlas.cloudwatch.CloudWatchPoller"
12 | }
13 | ]
14 | }
15 |
16 | cloudwatch {
17 | testMode = false
18 | categories = ${?atlas.cloudwatch.categories} [
19 | "ut1",
20 | "ut5",
21 | "ut-ec2",
22 | "ut-node",
23 | "ut-asg",
24 | "ut-timeout",
25 | "ut-offset",
26 | "ut-mono",
27 | "ut-redis",
28 | "ut-daily",
29 | "queryfilter",
30 | "nswithoutdetails",
31 | "nswithdetails"
32 | ]
33 |
34 | account {
35 | polling = {
36 | requestLimit = 300 // per second request limit for AWS.
37 | fastPolling = [
38 | "000000000001",
39 | "000000000002"
40 | ]
41 | default-regions = ["us-east-1", "us-west-2"]
42 | accounts = [
43 | {
44 | account = "000000000001"
45 | },
46 | {
47 | account = "000000000002"
48 | },
49 | {
50 | account = "000000000003"
51 | regions = ["us-east-1"]
52 | }
53 | ]
54 | }
55 | routing {
56 | uri = "https://publish-${STACK}.${REGION}.foo.com/api/v1/publish"
57 | config-uri = "https://lwc-${STACK}.${REGION}.foo.com/api/v1/expressions"
58 | eval-uri = "https://lwc-${STACK}.${REGION}.foo.com/api/v1/evaluate"
59 | default = "main"
60 | routes = [
61 | {
62 | stack = "stackA"
63 | accounts = [
64 | {
65 | account = "1"
66 | routing = {
67 | "us-west-1" = "us-west-1"
68 | }
69 | },
70 | {
71 | account = "2"
72 | }
73 | ]
74 | },
75 | {
76 | stack = "stackB"
77 | accounts = [
78 | {
79 | account = "3"
80 | }
81 | ]
82 | }
83 | ]
84 | }
85 | }
86 |
87 | tagger = {
88 | common-tags = [
89 | {
90 | key = "nf.region"
91 | value = "us-west-2"
92 | }
93 | ]
94 | }
95 | }
96 |
97 | }
98 |
99 | include "test-rules.conf"
--------------------------------------------------------------------------------
/iep-lwc-fwding-admin/src/test/scala/com/netflix/iep/lwc/fwd/admin/ValidationSuite.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2014-2025 Netflix, Inc.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.netflix.iep.lwc.fwd.admin
17 |
18 | import com.netflix.atlas.core.model.StyleExpr
19 | import com.netflix.iep.lwc.fwd.cw.ForwardingDimension
20 | import com.netflix.iep.lwc.fwd.cw.ForwardingExpression
21 | import munit.FunSuite
22 |
23 | class ValidationSuite extends FunSuite with TestAssertions with CwForwardingTestConfig {
24 |
25 | test("Perform a required validation") {
26 |
27 | val validation = Validation(
28 | "RequiredCheck",
29 | true,
30 | (_, _) => throw new IllegalArgumentException("Validation failed")
31 | )
32 |
33 | assertFailure(
34 | validation.validate(
35 | makeConfig(),
36 | ForwardingExpression("", "", None, "", List.empty[ForwardingDimension]),
37 | List.empty[StyleExpr]
38 | ),
39 | "Validation failed"
40 | )
41 |
42 | }
43 |
44 | test("Perform an optional validation") {
45 | val validation = Validation(
46 | "OptionalCheck",
47 | false,
48 | (_, _) => throw new IllegalArgumentException("Validation failed")
49 | )
50 |
51 | assertFailure(
52 | validation.validate(
53 | makeConfig(),
54 | ForwardingExpression("", "", None, "", List.empty[ForwardingDimension]),
55 | List.empty[StyleExpr]
56 | ),
57 | "Validation failed"
58 | )
59 |
60 | }
61 |
62 | test("Skip an optional validation") {
63 | val validation = Validation(
64 | "OptionalCheck",
65 | false,
66 | (_, _) => throw new IllegalArgumentException("Validation failed")
67 | )
68 |
69 | validation.validate(
70 | makeConfig(checksToSkip = List("OptionalCheck")),
71 | ForwardingExpression("", "", None, "", List.empty[ForwardingDimension]),
72 | List.empty[StyleExpr]
73 | )
74 | }
75 |
76 | }
77 |
--------------------------------------------------------------------------------
/iep-archaius/src/main/scala/com/netflix/iep/archaius/PropertiesLoader.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2014-2025 Netflix, Inc.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.netflix.iep.archaius
17 |
18 | import org.apache.pekko.actor.Actor
19 | import com.netflix.atlas.json.Json
20 | import com.typesafe.config.Config
21 | import com.typesafe.scalalogging.StrictLogging
22 | import software.amazon.awssdk.services.dynamodb.model.ScanRequest
23 |
24 | import scala.util.Failure
25 | import scala.util.Success
26 |
27 | /**
28 | * Actor for loading properties from dynamodb. Properties will get updated in the provided
29 | * `PropertiesContext`.
30 | */
31 | class PropertiesLoader(config: Config, propContext: PropertiesContext, dynamoService: DynamoService)
32 | extends Actor
33 | with StrictLogging {
34 |
35 | private val table = config.getString("netflix.iep.archaius.table")
36 |
37 | import scala.jdk.StreamConverters.*
38 |
39 | import scala.concurrent.duration.*
40 | import scala.concurrent.ExecutionContext.Implicits.global
41 | context.system.scheduler.scheduleAtFixedRate(5.seconds, 5.seconds, self, PropertiesLoader.Tick)
42 |
43 | def receive: Receive = {
44 | case PropertiesLoader.Tick =>
45 | val future = dynamoService.execute { client =>
46 | val request = ScanRequest.builder().tableName(table).build()
47 | client
48 | .scanPaginator(request)
49 | .items()
50 | .stream()
51 | .toScala(List)
52 | .flatMap(process)
53 | }
54 |
55 | future.onComplete {
56 | case Success(vs) => propContext.update(vs)
57 | case Failure(t) => logger.error("failed to refresh properties from dynamodb", t)
58 | }
59 | }
60 |
61 | private def process(item: AttrMap): Option[PropertiesApi.Property] = {
62 | Option(item.get("data")).map(v => Json.decode[PropertiesApi.Property](v.s()))
63 | }
64 | }
65 |
66 | object PropertiesLoader {
67 | case object Tick
68 | }
69 |
--------------------------------------------------------------------------------
/atlas-cloudwatch/src/main/resources/api-gateway.conf:
--------------------------------------------------------------------------------
1 |
2 | atlas {
3 | cloudwatch {
4 |
5 | // https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-metrics-and-dimensions.html
6 | api-gateway = {
7 | namespace = "AWS/ApiGateway"
8 | period = 1m
9 | end-period-offset = 3
10 |
11 | dimensions = [
12 | "ApiName",
13 | "Stage"
14 | ]
15 |
16 | metrics = [
17 | {
18 | name = "Count"
19 | alias = "aws.apigateway.requests"
20 | conversion = "sum,rate"
21 | },
22 | {
23 | name = "4XXError"
24 | alias = "aws.apigateway.errors"
25 | conversion = "sum,rate"
26 | tags = [
27 | {
28 | key = "status"
29 | value = "4xx"
30 | }
31 | ]
32 | },
33 | {
34 | name = "5XXError"
35 | alias = "aws.apigateway.errors"
36 | conversion = "sum,rate"
37 | tags = [
38 | {
39 | key = "status"
40 | value = "5xx"
41 | }
42 | ]
43 | },
44 | {
45 | name = "Latency"
46 | alias = "aws.apigateway.latency"
47 | conversion = "timer-millis"
48 | }
49 | ]
50 | }
51 |
52 | api-gateway-detail = {
53 | namespace = "AWS/ApiGateway"
54 | period = 1m
55 |
56 | dimensions = [
57 | "ApiName",
58 | "Method",
59 | "Resource",
60 | "Stage"
61 | ]
62 |
63 | metrics = [
64 | {
65 | name = "Count"
66 | alias = "aws.apigateway.requests"
67 | conversion = "sum,rate"
68 | },
69 | {
70 | name = "4XXError"
71 | alias = "aws.apigateway.errors"
72 | conversion = "sum,rate"
73 | tags = [
74 | {
75 | key = "status"
76 | value = "4xx"
77 | }
78 | ]
79 | },
80 | {
81 | name = "5XXError"
82 | alias = "aws.apigateway.errors"
83 | conversion = "sum,rate"
84 | tags = [
85 | {
86 | key = "status"
87 | value = "5xx"
88 | }
89 | ]
90 | },
91 | {
92 | name = "Latency"
93 | alias = "aws.apigateway.latency"
94 | conversion = "timer-millis"
95 | }
96 | ]
97 | }
98 | }
99 |
100 | }
101 |
--------------------------------------------------------------------------------
/atlas-cloudwatch/src/main/resources/sqs.conf:
--------------------------------------------------------------------------------
1 |
2 | atlas {
3 | cloudwatch {
4 |
5 | // http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/SQS_metricscollected.html
6 | sqs = {
7 | namespace = "AWS/SQS"
8 | period = 1m
9 | end-period-offset = 4
10 |
11 | dimensions = [
12 | "QueueName"
13 | ]
14 |
15 | metrics = [
16 | {
17 | name = "ApproximateNumberOfMessagesDelayed"
18 | alias = "aws.sqs.approximateNumberOfMessages"
19 | conversion = "max"
20 | tags = [
21 | {
22 | key = "id"
23 | value = "delayed"
24 | }
25 | ]
26 | },
27 | {
28 | name = "ApproximateNumberOfMessagesNotVisible"
29 | alias = "aws.sqs.approximateNumberOfMessages"
30 | conversion = "max"
31 | tags = [
32 | {
33 | key = "id"
34 | value = "not-visible"
35 | }
36 | ]
37 | },
38 | {
39 | name = "ApproximateNumberOfMessagesVisible"
40 | alias = "aws.sqs.approximateNumberOfMessages"
41 | conversion = "max"
42 | tags = [
43 | {
44 | key = "id"
45 | value = "visible"
46 | }
47 | ]
48 | },
49 | {
50 | name = "ApproximateAgeOfOldestMessage"
51 | alias = "aws.sqs.approximateAgeOfOldestMessage"
52 | conversion = "max"
53 | },
54 | {
55 | name = "NumberOfMessagesSent"
56 | alias = "aws.sqs.messagesSent"
57 | conversion = "sum,rate"
58 | },
59 | {
60 | name = "NumberOfMessagesReceived"
61 | alias = "aws.sqs.messagesReceived"
62 | conversion = "sum,rate"
63 | },
64 | {
65 | name = "NumberOfMessagesDeleted"
66 | alias = "aws.sqs.messagesDeleted"
67 | conversion = "sum,rate"
68 | },
69 | {
70 | name = "NumberOfEmptyReceives"
71 | alias = "aws.sqs.emptyReceives"
72 | conversion = "sum,rate"
73 | }
74 | ]
75 | }
76 |
77 | sqs-msg-size = {
78 | namespace = "AWS/SQS"
79 | period = 1m
80 | end-period-offset = 155
81 |
82 | dimensions = [
83 | "QueueName"
84 | ]
85 |
86 | metrics = [
87 | {
88 | name = "SentMessageSize"
89 | alias = "aws.sqs.messageSize"
90 | conversion = "dist-summary"
91 | }
92 | ]
93 | }
94 | }
95 |
96 | }
--------------------------------------------------------------------------------
/atlas-cloudwatch/src/main/scala/com/netflix/atlas/cloudwatch/MetricData.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2014-2025 Netflix, Inc.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.netflix.atlas.cloudwatch
17 |
18 | import com.netflix.atlas.cloudwatch.MetricData.DatapointNaN
19 | import software.amazon.awssdk.services.cloudwatch.model.Datapoint
20 | import software.amazon.awssdk.services.cloudwatch.model.StandardUnit
21 |
22 | import java.time.Instant
23 |
24 | case class MetricData(
25 | meta: MetricMetadata,
26 | previous: Option[Datapoint],
27 | current: Option[Datapoint],
28 | lastReportedTimestamp: Option[Instant]
29 | ) {
30 |
31 | def datapoint: Datapoint = {
32 | if (meta.definition.monotonicValue) {
33 | previous.fold(DatapointNaN) { p =>
34 | // For a monotonic counter, use the max statistic. These will typically have a
35 | // single reporting source that maintains the state over time. If the sample count
36 | // is larger than one, it will be a spike due to the reporter sending the value
37 | // multiple times within that interval. The max will allow us to ignore those
38 | // spikes and get the last written value.
39 | val c = current.getOrElse(DatapointNaN)
40 | val delta = math.max(c.maximum - p.maximum, 0.0)
41 | Datapoint
42 | .builder()
43 | .minimum(delta)
44 | .maximum(delta)
45 | .sum(delta)
46 | .sampleCount(c.sampleCount)
47 | .timestamp(c.timestamp)
48 | .unit(c.unit)
49 | .build()
50 | }
51 | } else {
52 | // now reporting NaN to better align with CloudWatch were values will be missing.
53 | // Timeouts were rarely used.
54 | current.getOrElse(DatapointNaN)
55 | }
56 | }
57 | }
58 |
59 | object MetricData {
60 |
61 | private val DatapointNaN = Datapoint
62 | .builder()
63 | .minimum(Double.NaN)
64 | .maximum(Double.NaN)
65 | .sum(Double.NaN)
66 | .sampleCount(Double.NaN)
67 | .timestamp(Instant.now())
68 | .unit(StandardUnit.NONE)
69 | .build()
70 |
71 | }
72 |
--------------------------------------------------------------------------------
/atlas-cloudwatch/src/main/scala/com/netflix/atlas/webapi/RequestId.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2014-2025 Netflix, Inc.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.netflix.atlas.webapi
17 |
18 | import org.apache.pekko.http.scaladsl.model.ContentTypes
19 | import org.apache.pekko.http.scaladsl.model.HttpEntity
20 | import com.netflix.atlas.json.Json
21 | import com.netflix.atlas.webapi.RequestId.getByteArrayOutputStream
22 |
23 | import java.io.ByteArrayOutputStream
24 | import scala.util.Using
25 |
26 | /**
27 | * This is the class we have to use to respond to the Firehose publisher. The request ID and timestamp must match
28 | * those of the message received in order for Firehose to consider the data successfully processed.
29 | *
30 | * @param requestId
31 | * The request ID given by AWS Firehose.
32 | * @param timestamp
33 | * The timestamp in unix epoch milliseconds given by AWS Firehose.
34 | * @param exception
35 | * An optional exception to encode if processing failed. This will be stored in Cloud Watch logs.
36 | */
37 | case class RequestId(requestId: String, timestamp: Long, exception: Option[Exception] = None) {
38 |
39 | def getEntity: HttpEntity.Strict = {
40 | val stream = getByteArrayOutputStream
41 | Using.resource(Json.newJsonGenerator(stream)) { json =>
42 | json.writeStartObject()
43 | json.writeStringField("requestId", requestId)
44 | json.writeNumberField("timestamp", timestamp)
45 | exception.map { ex =>
46 | json.writeStringField("errorMessage", ex.getMessage)
47 | }
48 | json.writeEndObject()
49 | }
50 | HttpEntity(ContentTypes.`application/json`, stream.toByteArray)
51 | }
52 |
53 | }
54 |
55 | object RequestId {
56 |
57 | private val byteArrayStreams = new ThreadLocal[ByteArrayOutputStream]
58 |
59 | private[atlas] def getByteArrayOutputStream: ByteArrayOutputStream = {
60 | var baos = byteArrayStreams.get()
61 | if (baos == null) {
62 | baos = new ByteArrayOutputStream()
63 | byteArrayStreams.set(baos)
64 | } else {
65 | baos.reset()
66 | }
67 | baos
68 | }
69 | }
70 |
--------------------------------------------------------------------------------
/atlas-stream/src/test/scala/com/netflix/atlas/stream/EvalFlowSuite.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2014-2025 Netflix, Inc.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.netflix.atlas.stream
17 |
18 | import org.apache.pekko.actor.ActorSystem
19 | import org.apache.pekko.stream.scaladsl.Sink
20 | import org.apache.pekko.stream.scaladsl.Source
21 | import com.netflix.atlas.pekko.DiagnosticMessage
22 | import com.netflix.atlas.eval.stream.Evaluator.DataSource
23 | import com.netflix.atlas.eval.stream.Evaluator.DataSources
24 | import com.netflix.atlas.eval.stream.Evaluator.MessageEnvelope
25 | import com.netflix.spectator.api.NoopRegistry
26 | import com.typesafe.config.ConfigFactory
27 | import munit.FunSuite
28 |
29 | import scala.concurrent.Await
30 | import scala.concurrent.duration.Duration
31 |
32 | class EvalFlowSuite extends FunSuite {
33 |
34 | private implicit val system: ActorSystem = ActorSystem(getClass.getSimpleName)
35 | private val config = ConfigFactory.load
36 | private val registry = new NoopRegistry()
37 | private val validateNoop: DataSource => Unit = _ => ()
38 |
39 | private val dataSourceStr =
40 | """[{"id":"abc", "step": 10, "uri":"http://local-dev/api/v1/graph?q=name,a,:eq"}]"""
41 |
42 | test("register and get message") {
43 |
44 | val evalService = new EvalService(config, registry, null, system) {
45 | override def updateDataSources(streamId: String, dataSources: DataSources): Unit = {
46 | val handler = getStreamInfo(streamId).handler
47 | handler.offer(new MessageEnvelope("mockId", DiagnosticMessage.info("mockMsg")))
48 | handler.complete()
49 | }
50 | }
51 |
52 | val evalFlow = EvalFlow.createEvalFlow(evalService, DataSourceValidator(10, validateNoop))
53 |
54 | Source.single(dataSourceStr).via(evalFlow)
55 | val future = Source
56 | .single(dataSourceStr)
57 | .via(evalFlow)
58 | .filter(envelope => envelope.id() != "_") // filter out heartbeat
59 | .runWith(Sink.head)
60 | val messageEnvelope = Await.result(future, Duration.Inf)
61 |
62 | assertEquals(messageEnvelope.id(), "mockId")
63 | }
64 | }
65 |
--------------------------------------------------------------------------------
/atlas-druid/src/main/scala/com/netflix/atlas/druid/ExplainApi.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2014-2025 Netflix, Inc.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.netflix.atlas.druid
17 |
18 | import org.apache.pekko.actor.ActorRefFactory
19 | import org.apache.pekko.http.scaladsl.model.HttpEntity
20 | import org.apache.pekko.http.scaladsl.model.HttpResponse
21 | import org.apache.pekko.http.scaladsl.model.MediaTypes
22 | import org.apache.pekko.http.scaladsl.model.StatusCodes
23 | import org.apache.pekko.http.scaladsl.server.Directives.*
24 | import org.apache.pekko.http.scaladsl.server.Route
25 | import org.apache.pekko.http.scaladsl.server.RouteResult
26 | import org.apache.pekko.pattern.ask
27 | import org.apache.pekko.util.Timeout
28 | import com.netflix.atlas.pekko.CustomDirectives.*
29 | import com.netflix.atlas.pekko.WebApi
30 | import com.netflix.atlas.druid.ExplainApi.ExplainRequest
31 | import com.netflix.atlas.eval.graph.Grapher
32 | import com.netflix.atlas.json.Json
33 | import com.netflix.atlas.webapi.GraphApi.DataRequest
34 | import com.typesafe.config.Config
35 |
36 | import scala.concurrent.ExecutionContext
37 | import scala.concurrent.duration.*
38 |
39 | class ExplainApi(config: Config, implicit val actorRefFactory: ActorRefFactory) extends WebApi {
40 |
41 | private val grapher: Grapher = Grapher(config)
42 |
43 | private val dbRef = actorRefFactory.actorSelection("/user/db")
44 |
45 | private implicit val ec: ExecutionContext = actorRefFactory.dispatcher
46 |
47 | override def routes: Route = {
48 | endpointPath("explain" / "v1" / "graph") {
49 | get { ctx =>
50 | val graphCfg = grapher.toGraphConfig(ctx.request)
51 | dbRef
52 | .ask(ExplainRequest(DataRequest(graphCfg)))(Timeout(10.seconds))
53 | .map { response =>
54 | val json = Json.encode(response)
55 | val entity = HttpEntity(MediaTypes.`application/json`, json)
56 | RouteResult.Complete(HttpResponse(StatusCodes.OK, entity = entity))
57 | }
58 | }
59 | }
60 | }
61 | }
62 |
63 | object ExplainApi {
64 | case class ExplainRequest(dataRequest: DataRequest)
65 | }
66 |
--------------------------------------------------------------------------------
/project/GitVersion.scala:
--------------------------------------------------------------------------------
1 | import sbt.*
2 | import sbt.Keys.*
3 | import sbtrelease.Version
4 | import com.github.sbt.git.SbtGit.*
5 |
6 | object GitVersion {
7 |
8 | // Base version for master branch
9 | private val baseVersion = "v0.5.x"
10 |
11 | // 0.1.x
12 | private val versionBranch = """v?([0-9.]+)(?:\.x)?""".r
13 |
14 | // v0.1.47-31-g230560c
15 | // v0.1.47-20150807.161518-9
16 | private val snapshotVersion = """v?([0-9.]+)-\d+-[0-9a-z]+""".r
17 |
18 | // 1.5.0-rc.1-123-gcbfe51a
19 | private val candidateVersion = """v?([0-9.]+)(?:-rc\.\d+)?-\d+-[0-9a-z]+""".r
20 |
21 | // v0.1.47
22 | // 1.5.0-rc.1
23 | private val releaseVersion = """v?([0-9.]+(?:-rc\.\d+)?)""".r
24 |
25 | /**
26 | * Needs to check for "false", don't assume it will ever be set to "true".
27 | * http://docs.travis-ci.com/user/environment-variables/#Default-Environment-Variables
28 | */
29 | private def isPullRequest: Boolean = sys.env.getOrElse("GITHUB_EVENT_NAME", "unknown") == "pull_request"
30 |
31 | /**
32 | * Bump the last git described version to use for the current snapshot. If it is a version branch
33 | * and the prefix doesn't match, then it is the first snapshot for the branch so use the branch
34 | * version to start with.
35 | */
36 | private def toSnapshotVersion(branch: String, v: String): String = {
37 | val v2 = Version(v).map(_.bumpNext.unapply).getOrElse(v)
38 | val suffix = "-SNAPSHOT"
39 | branch match {
40 | case versionBranch(b) if !v2.startsWith(b) =>
41 | s"${Version(s"$b.0").map(_.unapply).getOrElse(v2)}$suffix"
42 | case _ =>
43 | s"$v2$suffix"
44 | }
45 | }
46 |
47 | private def extractBranchName(dflt: String): String = {
48 | val ref = sys.env.getOrElse("GITHUB_REF", dflt)
49 | // Return last part if there is a '/', e.g. refs/heads/feature-branch-1. For
50 | // this use-case we only care about master and version branches so it is ok
51 | // if something like 'feature/branch/1' exacts just the '1'.
52 | val parts = ref.split("/")
53 | parts(parts.length - 1)
54 | }
55 |
56 | lazy val settings: Seq[Def.Setting[?]] = Seq(
57 | ThisBuild / version := {
58 | val branch = extractBranchName(git.gitCurrentBranch.value)
59 | val branchVersion = if (branch == "main" || branch == "master") baseVersion else branch
60 | git.gitDescribedVersion.value.getOrElse("0.1-SNAPSHOT") match {
61 | case _ if isPullRequest => s"0.0.0-PULLREQUEST"
62 | case snapshotVersion(v) => toSnapshotVersion(branchVersion, v)
63 | case candidateVersion(v) => s"$v-SNAPSHOT"
64 | case releaseVersion(v) => v
65 | case v => v
66 | }
67 | }
68 | )
69 | }
70 |
--------------------------------------------------------------------------------
/atlas-aggregator/src/main/scala/com/netflix/atlas/aggregator/AppConfiguration.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2014-2025 Netflix, Inc.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.netflix.atlas.aggregator
17 |
18 | import com.netflix.iep.admin.EndpointMapping
19 | import com.netflix.iep.admin.endpoints.SpectatorEndpoint
20 | import com.netflix.iep.config.ConfigManager
21 | import com.netflix.iep.config.DynamicConfigManager
22 | import org.apache.pekko.actor.ActorSystem
23 | import com.netflix.spectator.api.Clock
24 | import com.netflix.spectator.api.NoopRegistry
25 | import com.netflix.spectator.api.Registry
26 | import com.typesafe.config.Config
27 | import com.typesafe.config.ConfigFactory
28 | import org.springframework.context.annotation.Bean
29 | import org.springframework.context.annotation.Configuration
30 |
31 | import java.util.Optional
32 |
33 | @Configuration
34 | class AppConfiguration {
35 |
36 | @Bean
37 | def pekkoClient(
38 | registry: Optional[Registry],
39 | system: ActorSystem
40 | ): PekkoClient = {
41 | val r = registry.orElseGet(() => new NoopRegistry)
42 | new PekkoClient(r, system)
43 | }
44 |
45 | @Bean
46 | def atlasAggregatorService(
47 | config: Optional[Config],
48 | registry: Optional[Registry],
49 | client: PekkoClient
50 | ): AtlasAggregatorService = {
51 | val c = config.orElseGet(() => ConfigFactory.load())
52 | val r = registry.orElseGet(() => new NoopRegistry)
53 | new AtlasAggregatorService(c, Clock.SYSTEM, r, client)
54 | }
55 |
56 | @Bean
57 | def shardedAggregatorService(
58 | configManager: Optional[DynamicConfigManager],
59 | registry: Optional[Registry],
60 | client: PekkoClient,
61 | atlasAggregatorService: AtlasAggregatorService
62 | ): ShardedAggregatorService = {
63 | val c = configManager.orElseGet(() => ConfigManager.dynamicConfigManager())
64 | val r = registry.orElseGet(() => new NoopRegistry)
65 | new ShardedAggregatorService(c, r, client, atlasAggregatorService)
66 | }
67 |
68 | @Bean
69 | def aggrRegistryEndpoint(service: AtlasAggregatorService): EndpointMapping = {
70 | new EndpointMapping("/aggregates", new SpectatorEndpoint(service.atlasRegistry))
71 | }
72 | }
73 |
--------------------------------------------------------------------------------
/atlas-cloudwatch/src/main/resources/msk-serverless.conf:
--------------------------------------------------------------------------------
1 | atlas {
2 | cloudwatch {
3 |
4 | // https://docs.aws.amazon.com/msk/latest/developerguide/serverless-monitoring.html
5 | msk-serverless-cluster = {
6 | namespace = "AWS/Kafka"
7 | period = 1m
8 | end-period-offset = 5
9 |
10 | dimensions = [
11 | "Cluster Name",
12 | "Topic"
13 | ]
14 |
15 | metrics = [
16 | {
17 | name = "BytesInPerSec"
18 | alias = "aws.mskserverless.bytes"
19 | conversion = "sum"
20 | tags = [
21 | {
22 | key = "id"
23 | value = "in"
24 | }
25 | ]
26 | },
27 | {
28 | name = "BytesOutPerSec"
29 | alias = "aws.mskserverless.bytes"
30 | conversion = "sum"
31 | tags = [
32 | {
33 | key = "id"
34 | value = "out"
35 | }
36 | ]
37 | },
38 | {
39 | name = "FetchMessageConversionsPerSec"
40 | alias = "aws.mskserverless.messageConversions"
41 | conversion = "sum"
42 | tags = [
43 | {
44 | key = "id"
45 | value = "fetch"
46 | }
47 | ]
48 | },
49 | {
50 | name = "ProduceMessageConversionsPerSec"
51 | alias = "aws.mskserverless.messageConversions"
52 | conversion = "sum"
53 | tags = [
54 | {
55 | key = "id"
56 | value = "produce"
57 | }
58 | ]
59 | },
60 | {
61 | name = "MessagesInPerSec"
62 | alias = "aws.mskserverless.messages"
63 | conversion = "sum"
64 | tags = [
65 | {
66 | key = "id"
67 | value = "in"
68 | }
69 | ]
70 | }
71 | ]
72 | }
73 |
74 | msk-serverless-consumergroup = {
75 | namespace = "AWS/Kafka"
76 | period = 1m
77 | end-period-offset = 5
78 |
79 | dimensions = [
80 | "Cluster Name",
81 | "Topic",
82 | "Consumer Group"
83 | ]
84 |
85 | metrics = [
86 | {
87 | name = "EstimatedMaxTimeLag"
88 | alias = "aws.mskserverless.estimatedMaxTimeLag"
89 | conversion = "max"
90 | },
91 | {
92 | name = "MaxOffsetLag"
93 | alias = "aws.mskserverless.maxOffsetLag"
94 | conversion = "max"
95 | },
96 | {
97 | name = "SumOffsetLag"
98 | alias = "aws.mskserverless.sumOffsetLag"
99 | conversion = "max"
100 | }
101 | ]
102 | }
103 | }
104 | }
105 |
--------------------------------------------------------------------------------
/atlas-cloudwatch/src/main/scala/com/netflix/atlas/cloudwatch/CloudWatchRules.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2014-2025 Netflix, Inc.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.netflix.atlas.cloudwatch
17 |
18 | import com.typesafe.config.Config
19 |
20 | /**
21 | * Compiles to configurations into a lookup map based on namespace and metric.
22 | *
23 | * @param config
24 | * The non-null config to load the rules from.
25 | */
26 | class CloudWatchRules(config: Config) {
27 |
28 | private val nsMap = {
29 | // ns metric definitions to apply to metric
30 | var ruleMap = Map.empty[String, Map[String, List[(MetricCategory, List[MetricDefinition])]]]
31 | getCategories(config).foreach { category =>
32 | category.metrics.foreach { metricDef =>
33 | var inner = ruleMap.getOrElse(
34 | category.namespace,
35 | Map.empty[String, List[(MetricCategory, List[MetricDefinition])]]
36 | )
37 | // val (_, list) = inner.getOrElse(m.name, List((c, List.empty[MetricDefinition])))
38 | var entry =
39 | inner.getOrElse(metricDef.name, List.empty[(MetricCategory, List[MetricDefinition])])
40 |
41 | var updated = false
42 | for (i <- 0 until entry.size) {
43 | val (cat, defs) = entry(i)
44 | if (cat == category) {
45 | entry = entry.updated(i, (cat, defs :+ metricDef))
46 | updated = true
47 | }
48 | }
49 |
50 | if (!updated) {
51 | entry = entry :+ (category, List(metricDef))
52 | }
53 | inner += metricDef.name -> entry
54 | ruleMap += (category.namespace -> inner)
55 | }
56 | }
57 | ruleMap
58 | }
59 |
60 | // converted to a method for unit testing.
61 | def rules: Map[String, Map[String, List[(MetricCategory, List[MetricDefinition])]]] = nsMap
62 |
63 | private[cloudwatch] def getCategories(config: Config): List[MetricCategory] = {
64 | import scala.jdk.CollectionConverters.*
65 | val categories = config.getStringList("atlas.cloudwatch.categories").asScala.map { name =>
66 | val cfg = config.getConfig(s"atlas.cloudwatch.$name")
67 | MetricCategory.fromConfig(cfg)
68 | }
69 | categories.toList
70 | }
71 |
72 | }
73 |
--------------------------------------------------------------------------------
/atlas-cloudwatch/src/main/resources/efs.conf:
--------------------------------------------------------------------------------
1 |
2 | atlas {
3 | cloudwatch {
4 |
5 | // http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/efs-metricscollected.html
6 | efs = {
7 | namespace = "AWS/EFS"
8 | period = 1m
9 | end-period-offset = 9
10 |
11 | dimensions = [
12 | "FileSystemId"
13 | ]
14 |
15 | metrics = [
16 | {
17 | name = "MeteredIOBytes"
18 | alias = "aws.efs.meteredIOBytes"
19 | conversion = "sum,rate"
20 | tags = []
21 | },
22 | {
23 | name = "TotalIOBytes"
24 | alias = "aws.efs.totalIOBytes"
25 | conversion = "sum,rate"
26 | tags = []
27 | },
28 | {
29 | name = "DataReadIOBytes"
30 | alias = "aws.efs.ioThroughput"
31 | conversion = "sum,rate"
32 | tags = [
33 | {
34 | key = "id"
35 | value = "read"
36 | }
37 | ]
38 | },
39 | {
40 | name = "DataWriteIOBytes"
41 | alias = "aws.efs.ioThroughput"
42 | conversion = "sum,rate"
43 | tags = [
44 | {
45 | key = "id"
46 | value = "write"
47 | }
48 | ]
49 | },
50 | {
51 | name = "MetadataIOBytes"
52 | alias = "aws.efs.ioThroughput"
53 | conversion = "sum,rate"
54 | tags = [
55 | {
56 | key = "id"
57 | value = "metadata"
58 | }
59 | ]
60 | },
61 | {
62 | name = "PermittedThroughput"
63 | alias = "aws.efs.permittedThroughput"
64 | conversion = "max"
65 | tags = []
66 | },
67 | {
68 | name = "BurstCreditBalance"
69 | alias = "aws.efs.burstCreditBalance"
70 | conversion = "max"
71 | tags = []
72 | },
73 | {
74 | name = "PercentIOLimit"
75 | alias = "aws.efs.percentIOLimit"
76 | conversion = "max"
77 | tags = []
78 | },
79 | {
80 | name = "ClientConnections"
81 | alias = "aws.efs.clientConnections"
82 | conversion = "sum"
83 | tags = []
84 | },
85 |
86 | ]
87 | }
88 |
89 | efs-storage = {
90 | namespace = "AWS/EFS"
91 | period = 15m
92 | end-period-offset = 1
93 |
94 | dimensions = [
95 | "FileSystemId",
96 | "StorageClass"
97 | ]
98 |
99 | metrics = [
100 | {
101 | name = "StorageBytes"
102 | alias = "aws.efs.storageBytes"
103 | conversion = "sum,rate"
104 | tags = []
105 | }
106 | ]
107 | }
108 | }
109 | }
--------------------------------------------------------------------------------
/iep-archaius/src/main/scala/com/netflix/iep/archaius/PropertiesContext.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2014-2025 Netflix, Inc.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.netflix.iep.archaius
17 |
18 | import java.util.concurrent.CountDownLatch
19 | import java.util.concurrent.atomic.AtomicLong
20 | import java.util.concurrent.atomic.AtomicReference
21 |
22 | import com.netflix.spectator.api.Functions
23 | import com.netflix.spectator.api.Registry
24 | import com.netflix.spectator.api.patterns.PolledMeter
25 | import com.typesafe.scalalogging.StrictLogging
26 |
27 | /**
28 | * Context for accessing property values from the storage system. This class can be
29 | * asynchronously updated so that local access to properties does not need to result
30 | * into a separate call to the storage layer.
31 | */
32 | class PropertiesContext(registry: Registry) extends StrictLogging {
33 |
34 | private val clock = registry.clock()
35 |
36 | /**
37 | * Tracks the age for the properties cache. This can be used for a simple alert to
38 | * detect staleness.
39 | */
40 | private val lastUpdateTime = PolledMeter
41 | .using(registry)
42 | .withName("iep.props.cacheAge")
43 | .monitorValue(new AtomicLong(clock.wallTime()), Functions.AGE)
44 |
45 | private val updateLatch = new AtomicReference[CountDownLatch](new CountDownLatch(1))
46 | private val ref = new AtomicReference[PropList]()
47 |
48 | /** Update the properties cache for this context. */
49 | def update(props: PropList): Unit = {
50 | lastUpdateTime.set(clock.wallTime())
51 | ref.set(props)
52 | logger.debug(s"properties updated from dynamodb, size = ${props.size}")
53 | updateLatch.get().countDown()
54 | }
55 |
56 | /**
57 | * Returns true if properties have been updated at least once. Users of this class should
58 | * check that it has been properly initialized before consuming properties.
59 | */
60 | def initialized: Boolean = ref.get != null
61 |
62 | /** Return all properties. */
63 | def getAll: PropList = ref.get
64 |
65 | /** Return all properties for the specified cluster. */
66 | def getClusterProps(cluster: String): PropList = ref.get.filter(_.cluster == cluster)
67 |
68 | /** Used for testing. This returns a latch that will get updated along with properties. */
69 | private[archaius] def latch: CountDownLatch = {
70 | val latch = new CountDownLatch(1)
71 | updateLatch.set(latch)
72 | latch
73 | }
74 | }
75 |
--------------------------------------------------------------------------------
/atlas-cloudwatch/src/main/resources/route53.conf:
--------------------------------------------------------------------------------
1 |
2 | atlas {
3 | cloudwatch {
4 |
5 | // https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/monitoring-cloudwatch.html
6 | route53-healthcheck = {
7 | namespace = "AWS/Route53"
8 | period = 1m
9 | end-period-offset = 5
10 |
11 | dimensions = [
12 | "HealthCheckId"
13 | ]
14 |
15 | metrics = [
16 | {
17 | name = "HealthCheckPercentageHealthy"
18 | alias = "aws.route53.percentHealthy"
19 | conversion = "max"
20 | },
21 | {
22 | name = "HealthCheckStatus"
23 | alias = "aws.route53.healthCheckStatus"
24 | conversion = "max"
25 | }
26 | ]
27 | }
28 |
29 | // https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/monitoring-hosted-zones-with-cloudwatch.html
30 | route53-hostedzone = {
31 | namespace = "AWS/Route53"
32 | period = 1m
33 | end-period-offset = 7
34 |
35 | dimensions = [
36 | "HostedZoneId"
37 | ]
38 |
39 | metrics = [
40 | {
41 | name = "DNSQueries"
42 | alias = "aws.route53.dnsQueries"
43 | conversion = "sum,rate"
44 | }
45 | ]
46 | }
47 |
48 | // https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/monitoring-hosted-zones-with-cloudwatch.html
49 | route53-resolver-inbound = {
50 | namespace = "AWS/Route53Resolver"
51 | period = 1m
52 | end-period-offset = 6
53 |
54 | // RniId or EndpointId, odd.
55 | dimensions = [
56 | "EndpointId"
57 | ]
58 |
59 | metrics = [
60 | {
61 | name = "InboundQueryVolume"
62 | alias = "aws.route53.resolverQueryVolume"
63 | conversion = "sum,rate"
64 | tags = [
65 | {
66 | key = "id"
67 | value = "inbound"
68 | }
69 | ]
70 | }
71 | ]
72 | }
73 |
74 | route53-resolver-outbound = {
75 | namespace = "AWS/Route53Resolver"
76 | period = 1m
77 | end-period-offset = 6
78 |
79 | // RniId or EndpointId.. bizzare.
80 | dimensions = [
81 | "EndpointId"
82 | ]
83 |
84 | metrics = [
85 | {
86 | name = "OutboundQueryVolume"
87 | alias = "aws.route53.resolverQueryVolume"
88 | conversion = "sum,rate"
89 | tags = [
90 | {
91 | key = "id"
92 | value = "outbound"
93 | }
94 | ]
95 | },
96 | {
97 | name = "OutboundQueryAggregateVolume"
98 | alias = "aws.route53.resolverAggregateQueryVolume"
99 | conversion = "sum,rate"
100 | tags = [
101 | {
102 | key = "id"
103 | value = "outbound"
104 | }
105 | ]
106 | }
107 | ]
108 | }
109 | }
110 | }
--------------------------------------------------------------------------------
/iep-lwc-fwding-admin/src/main/resources/cw-fwding-cfg-schema.json:
--------------------------------------------------------------------------------
1 | {
2 | "schema": {
3 | "$schema": "http://json-schema.org/draft-04/schema#",
4 | "title": "Cluster Config",
5 | "description": "Configuration for forwarding metrics from Atlas LWC to AWS CloudWatch",
6 | "type": "object",
7 | "required": [ "email", "expressions" ],
8 | "properties": {
9 | "email": {
10 | "description": "Email address of the service owner",
11 | "type": "string",
12 | "format": "email"
13 | },
14 | "expressions": {
15 | "description": "List of metric expressions",
16 | "type": "array",
17 | "uniqueItems": true,
18 | "items": {
19 | "type": "object",
20 | "required": [ "metricName", "atlasUri", "account" ],
21 | "properties": {
22 | "metricName": {
23 | "description": "Name of the metric that will be created in CloudWatch",
24 | "type": "string",
25 | "pattern": "^(?:(?:[\\w\\-\\.\\s]+)|(?:\\$\\([\\w\\-\\.]+\\)))+$"
26 | },
27 | "atlasUri": {
28 | "description": "Atlas query",
29 | "type": "string",
30 | "pattern": "^(https?:\/\/)?[\\w-]+(\\.[\\w-]+)*(:\\d+)?\/api\/v(\\d+){1}\/graph\\?.+$"
31 | },
32 | "comment": {
33 | "type": "string"
34 | },
35 | "dimensions": {
36 | "description": "Details about AWS CloudWatch dimension that will be created",
37 | "type": "array",
38 | "uniqueItems": true,
39 | "items": {
40 | "type": "object",
41 | "required": [ "name", "value" ],
42 | "properties": {
43 | "name": {
44 | "description": "Name of the CloudWatch dimension",
45 | "type": "string",
46 | "pattern": "^[\\w\\-\\.]+$"
47 | },
48 | "value": {
49 | "description": "Atlas tag. Example: $(nf.asg)",
50 | "type": "string",
51 | "pattern": "^(?:(?:[\\w\\-\\.]+)|(?:\\$\\([\\w\\-\\.]+\\)))+$"
52 | }
53 | }
54 | }
55 | },
56 | "account": {
57 | "description": "AWS account to send the metric to. Typically $(nf.account)",
58 | "type": "string",
59 | "pattern": "^([\\d]+|\\$\\([\\w\\-\\.]+\\))$"
60 | },
61 | "region": {
62 | "description": "AWS region to send the metric to",
63 | "type": "string",
64 | "pattern": "^([\\w\\-\\.]+|\\$\\([\\w\\-\\.]+\\))$"
65 | }
66 | }
67 | }
68 | },
69 | "checksToSkip": {
70 | "description": "The list of validations to skip",
71 | "type": "array",
72 | "uniqueItems": true,
73 | "items": {
74 | "type": "string",
75 | "minLength": 1
76 | }
77 | }
78 | }
79 | },
80 | "validationHook": "http://localhost/api/v1/cw/check/$configName"
81 | }
--------------------------------------------------------------------------------
/project/License.scala:
--------------------------------------------------------------------------------
1 | import java.io.File
2 | import java.io.PrintStream
3 | import java.time.ZonedDateTime
4 | import java.time.ZoneOffset
5 | import scala.io.Source
6 | import sbt._
7 |
8 | /**
9 | * Loosely based on: https://github.com/Banno/sbt-license-plugin
10 | *
11 | * Main changes in functionality:
12 | * - remove spurious whitespace on empty lines for license
13 | * - supports both test and main source files
14 | * - add target to check which can fail the build
15 | */
16 | object License {
17 | private val lineSeparator = System.getProperty("line.separator")
18 |
19 | def year = ZonedDateTime.now(ZoneOffset.UTC).getYear
20 |
21 | val apache2 = s"""
22 | |/*
23 | | * Copyright 2014-$year Netflix, Inc.
24 | | *
25 | | * Licensed under the Apache License, Version 2.0 (the "License");
26 | | * you may not use this file except in compliance with the License.
27 | | * You may obtain a copy of the License at
28 | | *
29 | | * http://www.apache.org/licenses/LICENSE-2.0
30 | | *
31 | | * Unless required by applicable law or agreed to in writing, software
32 | | * distributed under the License is distributed on an "AS IS" BASIS,
33 | | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
34 | | * See the License for the specific language governing permissions and
35 | | * limitations under the License.
36 | | */
37 | """.stripMargin.trim
38 |
39 | def findFiles(dir: File): Seq[File] = {
40 | (dir ** "*.scala").get ++ (dir ** "*.java").get
41 | }
42 |
43 | def checkLicenseHeaders(log: Logger, srcDir: File): Unit = {
44 | val badFiles = findFiles(srcDir).filterNot(checkLicenseHeader)
45 | if (badFiles.nonEmpty) {
46 | badFiles.foreach { f => log.error(s"bad license header: $f") }
47 | sys.error(s"${badFiles.size} files with incorrect header, run formatLicenseHeaders to fix")
48 | } else {
49 | log.info("all files have correct license header")
50 | }
51 | }
52 |
53 | def checkLicenseHeader(file: File): Boolean = {
54 | val lines = Source.fromFile(file, "UTF-8").getLines().toList
55 | checkLicenseHeader(lines)
56 | }
57 |
58 | def checkLicenseHeader(lines: List[String]): Boolean = {
59 | val header = lines.takeWhile(!_.startsWith("package ")).mkString(lineSeparator)
60 | header == apache2
61 | }
62 |
63 | def formatLicenseHeaders(log: Logger, srcDir: File): Unit = {
64 | findFiles(srcDir).foreach { f => formatLicenseHeader(log, f) }
65 | }
66 |
67 | def formatLicenseHeader(log: Logger, file: File): Unit = {
68 | val lines = Source.fromFile(file, "UTF-8").getLines().toList
69 | if (!checkLicenseHeader(lines)) {
70 | log.info(s"fixing license header: $file")
71 | writeLines(file, apache2 :: removeExistingHeader(lines))
72 | }
73 | }
74 |
75 | def removeExistingHeader(lines: List[String]): List[String] = {
76 | val res = lines.dropWhile(!_.startsWith("package "))
77 | if (res.isEmpty) lines else res
78 | }
79 |
80 | def writeLines(file: File, lines: List[String]): Unit = {
81 | val out = new PrintStream(file)
82 | try lines.foreach(out.println) finally out.close()
83 | }
84 | }
85 |
--------------------------------------------------------------------------------
/atlas-persistence/src/test/scala/com/netflix/atlas/persistence/S3CopyUtilsSuite.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2014-2025 Netflix, Inc.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.netflix.atlas.persistence
17 |
18 | import munit.FunSuite
19 | import java.io.File
20 |
21 | class S3CopyUtilsSuite extends FunSuite {
22 |
23 | test("isInactive returns false if lastModified = 0") {
24 | val file = new File("foo") {
25 | override def lastModified(): Long = 0
26 | }
27 | assert(!S3CopyUtils.isInactive(file, 1000, 12345L))
28 | }
29 |
30 | test("isInactive returns true if file is old") {
31 | val file = new File("foo") {
32 | override def lastModified(): Long = 1000
33 | }
34 | assert(S3CopyUtils.isInactive(file, 500, now = 2000))
35 | }
36 |
37 | test("shouldProcess returns false if file is in activeFiles") {
38 | val file = new File("bar")
39 | assert(!S3CopyUtils.shouldProcess(file, Set("bar"), 1000, _ => false))
40 | }
41 |
42 | test("shouldProcess returns true for inactive temp file") {
43 | val file = new File("baz.tmp") {
44 | override def lastModified(): Long = 1000
45 | }
46 | assert(S3CopyUtils.shouldProcess(file, Set.empty, 500, _ => true))
47 | }
48 |
49 | test("shouldProcess returns false for active temp file") {
50 | val file = new File("baz.tmp") {
51 | override def lastModified(): Long = 0
52 | }
53 | // not inactive yet
54 | assert(!S3CopyUtils.shouldProcess(file, Set.empty, 500, _ => true))
55 | }
56 |
57 | test("shouldProcess returns true for non-temp file") {
58 | val file = new File("baz.data")
59 | assert(S3CopyUtils.shouldProcess(file, Set.empty, 500, _ => false))
60 | }
61 |
62 | test("buildS3Key produces expected key structure") {
63 | val fileName = "2020-05-10T0300.i-localhost.1.XkvU3A.1200-1320"
64 | val key = S3CopyUtils.buildS3Key(fileName, "atlas", 15)
65 | assert(key.contains("atlas/2020-05-10T0300"))
66 | assert(key.endsWith("/i-localhost.1.XkvU3A.1200-1320"))
67 | }
68 |
69 | test("hash returns 3-char hex prefix and original path") {
70 | val path = "atlas/2020-01-01T0000"
71 | val hashed = S3CopyUtils.hash(path)
72 | val parts = hashed.split("/", 2)
73 | assert(parts(0).length == 3)
74 | assert(parts(1).startsWith("atlas/"))
75 | }
76 |
77 | test("extractMinuteRange handles tmp and range files") {
78 | assertEquals(S3CopySink.extractMinuteRange("abc.tmp"), "61-61")
79 | assertEquals(S3CopySink.extractMinuteRange("abc.1200-1300"), "20-21")
80 | assertEquals(S3CopySink.extractMinuteRange("abc.0000-0123"), "00-02")
81 | }
82 | }
83 |
--------------------------------------------------------------------------------
/atlas-aggregator/src/main/resources/application.conf:
--------------------------------------------------------------------------------
1 |
2 | atlas.pekko {
3 | api-endpoints = [
4 | "com.netflix.atlas.pekko.ConfigApi",
5 | "com.netflix.atlas.pekko.HealthcheckApi",
6 | "com.netflix.atlas.aggregator.UpdateApi"
7 | ]
8 | }
9 |
10 | atlas.aggregator {
11 |
12 | cache {
13 | strings {
14 | max-size = 2000000
15 | }
16 | }
17 |
18 | rollup-policy = [
19 | {
20 | query = "percentile,:has"
21 | rollup = ["nf.node", "nf.task"]
22 | }
23 | ]
24 |
25 | publisher {
26 | queue-size = 10000
27 | }
28 |
29 | shards {
30 | // Ratio of traffic to send to report to shards instead of locally. Zero indicates
31 | // all traffic should go locally. 1 indicates all should go to the set of remote shards.
32 | // This setting is mainly used to transtion between a single aggregator cluster and a
33 | // set of shards.
34 | traffic-ratio = 0.0
35 |
36 | // How many shards are there? Set to 0 to disable.
37 | count = 0
38 |
39 | // Pattern for shard URIs. The number for the shard , [0, count), will be substituted
40 | // into the pattern. Use java format string accepting an integer.
41 | uri-pattern = ""
42 |
43 | // Size of the queue for each shard
44 | queue-size = 50000
45 |
46 | // Batch size for requests to publish to a shard
47 | batch-size = 5000
48 | }
49 |
50 | // Should the aggregation of gauges be delayed until the final eval step?
51 | delay-gauge-aggregation = true
52 |
53 | allowed-characters = "-._A-Za-z0-9^~"
54 |
55 | validation {
56 |
57 | // Maximum number of user tags
58 | max-user-tags = 20
59 |
60 | // Validation rules for tags, should only include simple TagRule instances
61 | rules = [
62 | {
63 | class = "com.netflix.atlas.core.validation.KeyLengthRule"
64 | min-length = 2
65 | max-length = 60
66 | },
67 | {
68 | class = "com.netflix.atlas.core.validation.NameValueLengthRule"
69 | name {
70 | min-length = 2
71 | max-length = 255
72 | }
73 | others {
74 | min-length = 1
75 | max-length = 120
76 | }
77 | },
78 | {
79 | class = "com.netflix.atlas.core.validation.ReservedKeyRule"
80 | prefix = "atlas."
81 | allowed-keys = [
82 | "aggr",
83 | "dstype",
84 | "offset",
85 | "legacy"
86 | ]
87 | },
88 | {
89 | class = "com.netflix.atlas.core.validation.ReservedKeyRule"
90 | prefix = "nf."
91 | allowed-keys = [
92 | "account",
93 | "ami",
94 | "app",
95 | "asg",
96 | "cluster",
97 | "container",
98 | "country",
99 | "country.rollup",
100 | "job",
101 | "node",
102 | "process",
103 | "region",
104 | "shard1",
105 | "shard2",
106 | "stack",
107 | "subnet",
108 | "task",
109 | "vmtype",
110 | "vpc",
111 | "zone"
112 | ]
113 | }
114 | ]
115 | }
116 | }
--------------------------------------------------------------------------------
/iep-lwc-fwding-admin/src/main/scala/com/netflix/iep/lwc/fwd/admin/AppConfiguration.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2014-2025 Netflix, Inc.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package com.netflix.iep.lwc.fwd.admin
17 |
18 | import org.apache.pekko.actor.ActorSystem
19 | import com.netflix.atlas.eval.stream.Evaluator
20 | import com.netflix.iep.aws2.AwsClientFactory
21 | import com.netflix.spectator.api.NoopRegistry
22 | import com.netflix.spectator.api.Registry
23 | import com.typesafe.config.Config
24 | import com.typesafe.config.ConfigFactory
25 | import org.springframework.context.annotation.Bean
26 | import org.springframework.context.annotation.Configuration
27 | import software.amazon.awssdk.services.dynamodb.DynamoDbClient
28 |
29 | import java.util.Optional
30 |
31 | @Configuration
32 | class AppConfiguration {
33 |
34 | @Bean
35 | def awsDynamoDBClient(factory: AwsClientFactory): DynamoDbClient = {
36 | factory.getInstance(classOf[DynamoDbClient])
37 | }
38 |
39 | @Bean
40 | def scalingPoliciesDao(
41 | config: Optional[Config],
42 | system: ActorSystem
43 | ): ScalingPoliciesDao = {
44 | val c = config.orElseGet(() => ConfigFactory.load())
45 | new ScalingPoliciesDaoImpl(c, system)
46 | }
47 |
48 | @Bean
49 | def expressionDetailsDao(
50 | config: Optional[Config],
51 | ddbClient: DynamoDbClient,
52 | registry: Optional[Registry]
53 | ): ExpressionDetailsDao = {
54 | val c = config.orElseGet(() => ConfigFactory.load())
55 | val r = registry.orElseGet(() => new NoopRegistry)
56 | new ExpressionDetailsDaoImpl(c, ddbClient, r)
57 | }
58 |
59 | @Bean
60 | def purger(
61 | config: Optional[Config],
62 | expressionDetailsDao: ExpressionDetailsDao,
63 | system: ActorSystem
64 | ): Purger = {
65 | val c = config.orElseGet(() => ConfigFactory.load())
66 | new PurgerImpl(c, expressionDetailsDao, system)
67 | }
68 |
69 | @Bean
70 | def markerService(
71 | config: Optional[Config],
72 | registry: Optional[Registry],
73 | expressionDetailsDao: ExpressionDetailsDao,
74 | system: ActorSystem
75 | ): MarkerService = {
76 | val c = config.orElseGet(() => ConfigFactory.load())
77 | val r = registry.orElseGet(() => new NoopRegistry)
78 | new MarkerServiceImpl(c, r, expressionDetailsDao, system)
79 | }
80 |
81 | @Bean
82 | def schemaValidation(): SchemaValidation = {
83 | new SchemaValidation
84 | }
85 |
86 | @Bean
87 | def exprInterpreter(config: Config): ExprInterpreter = {
88 | new ExprInterpreter(config)
89 | }
90 |
91 | @Bean
92 | def cwExprValidations(interpreter: ExprInterpreter, evaluator: Evaluator): CwExprValidations = {
93 | new CwExprValidations(interpreter, evaluator)
94 | }
95 | }
96 |
--------------------------------------------------------------------------------