├── assets ├── GrafanaDashboard.png ├── DeviceSimulatorSampleProducer.svg └── OverallArchitecture.svg ├── analytics-kotlin ├── gradle │ └── wrapper │ │ ├── gradle-wrapper.jar │ │ └── gradle-wrapper.properties ├── settings.gradle ├── src │ └── main │ │ └── kotlin │ │ └── services │ │ ├── timestream │ │ ├── TimestreamPoint.kt │ │ ├── TimestreamInitializer.kt │ │ └── TimestreamSink.kt │ │ └── kinesisanalytics │ │ ├── utils │ │ └── ParameterToolUtils.kt │ │ ├── operators │ │ ├── OffsetFutureTimestreamPoints.kt │ │ └── JsonToTimestreamPayloadFn.kt │ │ └── StreamingJob.kt ├── build.gradle.kts ├── README.md ├── gradlew.bat └── gradlew ├── CODE_OF_CONDUCT.md ├── cdk ├── requirements.txt ├── cdk.json ├── stacks │ ├── grafana │ │ ├── datasource.json │ │ ├── grafana_stack.py │ │ └── dashboard.json │ ├── kinesis │ │ ├── amazon_kinesis_stream_stack.py │ │ ├── amazon_kinesis_analytics_source_stack.py │ │ └── amazon_kinesis_analytics_stack.py │ ├── sample_kinesis_stream_producer │ │ ├── sample_kinesis_stream_producer_stack.py │ │ ├── producer_lambda │ │ │ ├── app.py │ │ │ └── config.py │ │ └── README.md │ └── amazon_timestream_stack.py ├── setup.py ├── README.md └── app.py ├── .gitignore ├── LICENSE ├── analytics ├── src │ └── main │ │ └── java │ │ └── com │ │ └── amazonaws │ │ └── services │ │ ├── kinesisanalytics │ │ ├── utils │ │ │ └── ParameterToolUtils.java │ │ ├── operators │ │ │ ├── OffsetFutureTimestreamPoints.java │ │ │ └── JsonToTimestreamPayloadFn.java │ │ └── StreamingJob.java │ │ └── timestream │ │ ├── TimestreamInitializer.java │ │ ├── TimestreamPoint.java │ │ └── TimestreamSink.java ├── README.md └── pom.xml ├── setup.sh ├── redeploy-kda-app.sh ├── CONTRIBUTING.md └── README.md /assets/GrafanaDashboard.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/amazon-kinesis-timestream-grafana/HEAD/assets/GrafanaDashboard.png -------------------------------------------------------------------------------- /analytics-kotlin/gradle/wrapper/gradle-wrapper.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/amazon-kinesis-timestream-grafana/HEAD/analytics-kotlin/gradle/wrapper/gradle-wrapper.jar -------------------------------------------------------------------------------- /analytics-kotlin/settings.gradle: -------------------------------------------------------------------------------- 1 | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | // SPDX-License-Identifier: MIT-0 3 | 4 | rootProject.name = 'analytics-timestream-kotlin-sample' 5 | 6 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | ## Code of Conduct 2 | This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). 3 | For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact 4 | opensource-codeofconduct@amazon.com with any additional questions or comments. 5 | -------------------------------------------------------------------------------- /analytics-kotlin/gradle/wrapper/gradle-wrapper.properties: -------------------------------------------------------------------------------- 1 | # Copyright Amazon.com, Inc. and its affiliates. All Rights Reserved. 2 | # SPDX-License-Identifier: MIT-0 3 | distributionBase=GRADLE_USER_HOME 4 | distributionPath=wrapper/dists 5 | distributionUrl=https\://services.gradle.org/distributions/gradle-7.3.1-bin.zip 6 | zipStoreBase=GRADLE_USER_HOME 7 | zipStorePath=wrapper/dists 8 | -------------------------------------------------------------------------------- /cdk/requirements.txt: -------------------------------------------------------------------------------- 1 | # Copyright Amazon.com, Inc. and its affiliates. All Rights Reserved. 2 | # SPDX-License-Identifier: MIT-0 3 | 4 | -e . 5 | aws-cdk.aws-timestream 6 | aws-cdk.aws_kinesis 7 | aws-cdk.core 8 | aws_cdk.aws_events 9 | aws-cdk.aws-events-targets 10 | aws_cdk.aws_kinesisanalytics 11 | aws_cdk.aws_kinesis 12 | aws_cdk.aws_s3 13 | aws_cdk.aws_iam 14 | 15 | aws-cdk-aws_lambda_python 16 | aws-cdk.aws-ecs-patterns -------------------------------------------------------------------------------- /cdk/cdk.json: -------------------------------------------------------------------------------- 1 | { 2 | "_comment": "Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: MIT-0", 3 | "app": "python3 app.py", 4 | "requireApproval": "never", 5 | "context": { 6 | "@aws-cdk/core:enableStackNameDuplicates": "true", 7 | "aws-cdk:enableDiffNoFail": "true", 8 | "@aws-cdk/core:stackRelativeExports": "true", 9 | "@aws-cdk/aws-ecr-assets:dockerIgnoreSupport": true 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /cdk/stacks/grafana/datasource.json: -------------------------------------------------------------------------------- 1 | { 2 | "_comment": "Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: MIT-0", 3 | "name": "Amazon Timestream", 4 | "type": "grafana-timestream-datasource", 5 | "access": "proxy", 6 | "isDefault": true, 7 | "jsonData": { 8 | "defaultDatabase": "TimestreamDB", 9 | "defaultTable": "SampleMetricsTable", 10 | "defaultMeasure": "voltage_measure" 11 | } 12 | } -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Copyright Amazon.com, Inc. and its affiliates. All Rights Reserved. 2 | # SPDX-License-Identifier: MIT-0 3 | 4 | # Python 5 | # Byte-compiled / optimized / DLL files 6 | __pycache__/ 7 | *.py[cod] 8 | *$py.class 9 | cdk.egg-info/ 10 | 11 | # Environments 12 | .env 13 | .venv 14 | env/ 15 | venv/ 16 | ENV/ 17 | env.bak/ 18 | venv.bak/ 19 | 20 | # Java and Kotlin 21 | # Package Files # 22 | *.jar 23 | *.war 24 | *.nar 25 | *.ear 26 | *.zip 27 | *.tar.gz 28 | *.rar 29 | 30 | target 31 | build 32 | dependency-reduced-pom.xml 33 | 34 | # Gradle 35 | .gradle/ 36 | 37 | # CDK Context & Staging files 38 | .cdk.staging/ 39 | cdk.out/ 40 | 41 | # SAM Context & Staging files 42 | .aws-sam 43 | samconfig.toml 44 | 45 | # VS Code 46 | .vscode 47 | .ropeproject 48 | 49 | # IntelliJ 50 | /.idea/ 51 | *.iml 52 | 53 | .DS_Store 54 | /acat* 55 | -------------------------------------------------------------------------------- /analytics-kotlin/src/main/kotlin/services/timestream/TimestreamPoint.kt: -------------------------------------------------------------------------------- 1 | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | // SPDX-License-Identifier: MIT-0 3 | 4 | package services.timestream 5 | 6 | import com.amazonaws.services.timestreamwrite.model.MeasureValueType 7 | 8 | data class TimestreamPoint( 9 | var measureName: String? = null, 10 | var measureValueType: MeasureValueType? = null, 11 | var measureValue: String? = null, 12 | var time: Long = 0, 13 | var timeUnit: String? = null, 14 | private var dimensions: MutableMap = HashMap() 15 | ) { 16 | 17 | fun getDimensions(): Map { 18 | return dimensions.toMap() 19 | } 20 | 21 | fun addDimension(dimensionName: String, dimensionValue: String) { 22 | dimensions[dimensionName] = dimensionValue 23 | } 24 | } -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy of 4 | this software and associated documentation files (the "Software"), to deal in 5 | the Software without restriction, including without limitation the rights to 6 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of 7 | the Software, and to permit persons to whom the Software is furnished to do so. 8 | 9 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 10 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS 11 | FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR 12 | COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER 13 | IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 14 | CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 15 | 16 | -------------------------------------------------------------------------------- /cdk/stacks/kinesis/amazon_kinesis_stream_stack.py: -------------------------------------------------------------------------------- 1 | # Copyright Amazon.com, Inc. and its affiliates. All Rights Reserved. 2 | # SPDX-License-Identifier: MIT-0 3 | # 4 | # Licensed under the MIT-0 License. See the LICENSE accompanying this file 5 | # for the specific language governing permissions and limitations under 6 | # the License. 7 | 8 | from aws_cdk import ( 9 | aws_kinesis as kds, 10 | core 11 | ) 12 | from aws_cdk.core import Duration 13 | 14 | 15 | class KinesisStreamStack(core.Stack): 16 | 17 | def __init__(self, scope: core.Construct, construct_id: str, **kwargs) -> None: 18 | super().__init__(scope, construct_id, **kwargs) 19 | 20 | stream = kds.Stream(self, "InputStream", 21 | shard_count=1, 22 | retention_period=Duration.hours(24) 23 | ) 24 | 25 | self._stream = stream 26 | 27 | @property 28 | def stream(self) -> kds.IStream: 29 | return self._stream 30 | -------------------------------------------------------------------------------- /cdk/setup.py: -------------------------------------------------------------------------------- 1 | # Copyright Amazon.com, Inc. and its affiliates. All Rights Reserved. 2 | # SPDX-License-Identifier: MIT-0 3 | 4 | import setuptools 5 | 6 | with open("../README.md") as fp: 7 | long_description = fp.read() 8 | 9 | setuptools.setup( 10 | name="cdk", 11 | version="0.0.1", 12 | 13 | description="CDK stack to create near real time event processing and visualizations", 14 | long_description=long_description, 15 | long_description_content_type="text/markdown", 16 | 17 | author="Sascha Janssen and John Mousa", 18 | 19 | package_dir={"": "stacks"}, 20 | packages=setuptools.find_packages(where="stacks"), 21 | 22 | install_requires=[ 23 | "aws-cdk.core==1.*", 24 | ], 25 | 26 | python_requires=">=3.6", 27 | 28 | classifiers=[ 29 | "Development Status :: 4 - Beta", 30 | 31 | "Intended Audience :: Developers", 32 | 33 | "License :: OSI Approved :: Apache Software License", 34 | 35 | "Programming Language :: JavaScript", 36 | "Programming Language :: Python :: 3 :: Only", 37 | "Programming Language :: Python :: 3.6", 38 | "Programming Language :: Python :: 3.7", 39 | "Programming Language :: Python :: 3.8", 40 | 41 | "Topic :: Software Development :: Code Generators", 42 | "Topic :: Utilities", 43 | 44 | "Typing :: Typed", 45 | ], 46 | ) 47 | -------------------------------------------------------------------------------- /analytics-kotlin/src/main/kotlin/services/kinesisanalytics/utils/ParameterToolUtils.kt: -------------------------------------------------------------------------------- 1 | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | // SPDX-License-Identifier: MIT-0 3 | 4 | package services.kinesisanalytics.utils 5 | 6 | import com.amazonaws.services.kinesisanalytics.runtime.KinesisAnalyticsRuntime 7 | import org.apache.flink.api.java.utils.ParameterTool 8 | import java.util.* 9 | import kotlin.collections.set 10 | 11 | 12 | object ParameterToolUtils { 13 | 14 | private fun Properties.asParameterTool(): ParameterTool { 15 | val map: MutableMap = HashMap(this.size) 16 | this.forEach { k: Any?, v: Any? -> map[k as String?] = v as String? } 17 | return ParameterTool.fromMap(map) 18 | } 19 | 20 | fun fromArgsAndApplicationProperties(args: Array?): ParameterTool { 21 | //read parameters from command line arguments (for debugging) 22 | var parameter = ParameterTool.fromArgs(args) 23 | 24 | //read the parameters from the Kinesis Analytics environment 25 | val applicationProperties = KinesisAnalyticsRuntime.getApplicationProperties() 26 | val flinkProperties = applicationProperties["FlinkApplicationProperties"] 27 | if (flinkProperties != null) { 28 | parameter = parameter.mergeWith(flinkProperties.asParameterTool()) 29 | } 30 | return parameter 31 | } 32 | } -------------------------------------------------------------------------------- /analytics-kotlin/src/main/kotlin/services/kinesisanalytics/operators/OffsetFutureTimestreamPoints.kt: -------------------------------------------------------------------------------- 1 | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | // SPDX-License-Identifier: MIT-0 3 | package services.kinesisanalytics.operators 4 | 5 | import org.apache.flink.streaming.api.functions.ProcessFunction 6 | import org.apache.flink.util.Collector 7 | import services.timestream.TimestreamPoint 8 | import java.util.concurrent.TimeUnit 9 | 10 | class OffsetFutureTimestreamPoints : ProcessFunction, Collection>() { 11 | companion object { 12 | private val TIMESTREAM_FUTURE_THRESHOLD = TimeUnit.MINUTES.toMillis(15) 13 | } 14 | 15 | @Override 16 | override fun processElement( 17 | points: Collection, ctx: Context, 18 | out: Collector> 19 | ) { 20 | points.asSequence() 21 | .filter { pointTimestamp(it) > System.currentTimeMillis() + TIMESTREAM_FUTURE_THRESHOLD } 22 | .forEach { 23 | it.time = ctx.timestamp() 24 | it.timeUnit = TimeUnit.MILLISECONDS.name 25 | } 26 | out.collect(points) 27 | } 28 | 29 | private fun pointTimestamp(p: TimestreamPoint) = 30 | TimeUnit.valueOf(p.timeUnit 31 | .takeIf { it == "MILLISECONDS" || it == "SECONDS" || it == "MICROSECONDS" || it == "NANOSECONDS" } 32 | ?: TimeUnit.MILLISECONDS.name) 33 | .toMillis(p.time) 34 | } 35 | -------------------------------------------------------------------------------- /analytics/src/main/java/com/amazonaws/services/kinesisanalytics/utils/ParameterToolUtils.java: -------------------------------------------------------------------------------- 1 | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | // SPDX-License-Identifier: MIT-0 3 | 4 | package com.amazonaws.services.kinesisanalytics.utils; 5 | 6 | import com.amazonaws.services.kinesisanalytics.runtime.KinesisAnalyticsRuntime; 7 | import org.apache.flink.api.java.utils.ParameterTool; 8 | 9 | import java.io.IOException; 10 | import java.util.HashMap; 11 | import java.util.Map; 12 | import java.util.Properties; 13 | 14 | public class ParameterToolUtils { 15 | public static ParameterTool fromApplicationProperties(Properties properties) { 16 | Map map = new HashMap<>(properties.size()); 17 | 18 | properties.forEach((k, v) -> map.put((String) k, (String) v)); 19 | 20 | return ParameterTool.fromMap(map); 21 | } 22 | 23 | public static ParameterTool fromArgsAndApplicationProperties(String[] args) throws IOException { 24 | //read parameters from command line arguments (for debugging) 25 | ParameterTool parameter = ParameterTool.fromArgs(args); 26 | 27 | //read the parameters from the Kinesis Analytics environment 28 | Map applicationProperties = KinesisAnalyticsRuntime.getApplicationProperties(); 29 | 30 | Properties flinkProperties = applicationProperties.get("FlinkApplicationProperties"); 31 | 32 | if (flinkProperties != null) { 33 | parameter = parameter.mergeWith(ParameterToolUtils.fromApplicationProperties(flinkProperties)); 34 | } 35 | 36 | return parameter; 37 | } 38 | } -------------------------------------------------------------------------------- /cdk/stacks/sample_kinesis_stream_producer/sample_kinesis_stream_producer_stack.py: -------------------------------------------------------------------------------- 1 | # Copyright Amazon.com, Inc. and its affiliates. All Rights Reserved. 2 | # SPDX-License-Identifier: MIT-0 3 | 4 | from aws_cdk import ( 5 | aws_events_targets, 6 | aws_kinesis, 7 | aws_lambda, 8 | aws_lambda_python, 9 | core 10 | ) 11 | from aws_cdk.aws_events import Rule, Schedule, RuleTargetInput 12 | from aws_cdk.core import Duration 13 | 14 | 15 | class SampleKinesisStreamProducerStack(core.Stack): 16 | 17 | def __init__(self, scope: core.Construct, construct_id: str, stream: aws_kinesis.IStream, **kwargs) -> None: 18 | super().__init__(scope, construct_id, **kwargs) 19 | 20 | sample_device_producer = aws_lambda_python.PythonFunction(self, 'SampleDeviceProducer', 21 | entry='stacks/sample_kinesis_stream_producer/producer_lambda', 22 | index='app.py', 23 | runtime=aws_lambda.Runtime.PYTHON_3_8, 24 | timeout=core.Duration.seconds(30)) 25 | 26 | stream.grant_write(sample_device_producer) 27 | 28 | lambda_input = {"Stream": stream.stream_name} 29 | Rule(self, 'ProducerTriggerEventRule', 30 | enabled=True, 31 | schedule=Schedule.rate(Duration.minutes(1)), 32 | targets=[aws_events_targets.LambdaFunction(handler=sample_device_producer, 33 | event=RuleTargetInput.from_object(lambda_input))]) 34 | -------------------------------------------------------------------------------- /analytics/src/main/java/com/amazonaws/services/kinesisanalytics/operators/OffsetFutureTimestreamPoints.java: -------------------------------------------------------------------------------- 1 | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | // SPDX-License-Identifier: MIT-0 3 | package com.amazonaws.services.kinesisanalytics.operators; 4 | 5 | import com.amazonaws.services.timestream.TimestreamPoint; 6 | import org.apache.flink.streaming.api.functions.ProcessFunction; 7 | import org.apache.flink.util.Collector; 8 | 9 | import java.util.Collection; 10 | import java.util.concurrent.TimeUnit; 11 | 12 | public class OffsetFutureTimestreamPoints 13 | extends ProcessFunction, Collection> { 14 | 15 | private static final long TIMESTREAM_FUTURE_THRESHOLD = TimeUnit.MINUTES.toMillis(15); 16 | 17 | @Override 18 | public void processElement(Collection timestreamPoints, Context context, 19 | Collector> collector) { 20 | 21 | timestreamPoints.stream() 22 | .filter(p -> pointTimestamp(p) > System.currentTimeMillis() + TIMESTREAM_FUTURE_THRESHOLD) 23 | .forEach(p -> { 24 | p.setTime(context.timestamp()); 25 | p.setTimeUnit(TimeUnit.MILLISECONDS.name()); 26 | }); 27 | collector.collect(timestreamPoints); 28 | } 29 | 30 | private long pointTimestamp(TimestreamPoint point) { 31 | String timeUnit = TimeUnit.MILLISECONDS.name(); 32 | if ("MILLISECONDS".equals(point.getTimeUnit()) 33 | || "SECONDS".equals(point.getTimeUnit()) 34 | || "MICROSECONDS".equals(point.getTimeUnit()) 35 | || "NANOSECONDS".equals(point.getTimeUnit())) { 36 | timeUnit = point.getTimeUnit(); 37 | } 38 | return TimeUnit.valueOf(timeUnit).toMillis(point.getTime()); 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /analytics-kotlin/src/main/kotlin/services/kinesisanalytics/operators/JsonToTimestreamPayloadFn.kt: -------------------------------------------------------------------------------- 1 | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | // SPDX-License-Identifier: MIT-0 3 | 4 | package services.kinesisanalytics.operators 5 | 6 | import com.amazonaws.services.timestreamwrite.model.MeasureValueType 7 | import com.google.common.reflect.TypeToken 8 | import com.google.gson.Gson 9 | import org.apache.flink.api.common.functions.RichMapFunction 10 | import org.slf4j.LoggerFactory 11 | import services.timestream.TimestreamPoint 12 | import java.util.* 13 | 14 | class JsonToTimestreamPayloadFn : RichMapFunction>() { 15 | 16 | companion object { 17 | private val LOG = LoggerFactory.getLogger(JsonToTimestreamPayloadFn::class.java) 18 | } 19 | 20 | @Override 21 | @Throws(Exception::class) 22 | override fun map(jsonString: String): List { 23 | val map = Gson().fromJson>( 24 | jsonString, 25 | object : TypeToken>() {}.type 26 | ) 27 | val basePoint = TimestreamPoint() 28 | val measures = HashMap(map.size) 29 | 30 | for ((key, value) in map) { 31 | if (key.lowercase(Locale.ENGLISH).endsWith("_measure")) { 32 | measures[key] = value 33 | continue 34 | } 35 | 36 | when (key.lowercase(Locale.ENGLISH)) { 37 | "time" -> basePoint.time = value.toLong() 38 | "timeunit" -> basePoint.timeUnit = value 39 | else -> basePoint.addDimension(key, value) 40 | } 41 | } 42 | LOG.trace("mapped to point {}", basePoint) 43 | 44 | return measures.entries.asSequence() 45 | .map { 46 | basePoint.copy( 47 | measureName = it.key, measureValue = it.value, 48 | measureValueType = MeasureValueType.DOUBLE 49 | ) 50 | } 51 | .toList() 52 | } 53 | 54 | } -------------------------------------------------------------------------------- /setup.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | 3 | # 4 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 5 | # SPDX-License-Identifier: MIT-0 6 | 7 | # Run Amazon Kinesis Data Analytics app 8 | nohup aws kinesisanalyticsv2 start-application --application-name amazon-kinesis-analytics \ 9 | --run-configuration '{ "ApplicationRestoreConfiguration": { "ApplicationRestoreType": "SKIP_RESTORE_FROM_SNAPSHOT" } }' \ 10 | &>/dev/null & 11 | echo "Started analytics app" 12 | printf "\n" 13 | 14 | # Get grafana URL and credentials 15 | grafana_url=$( 16 | aws cloudformation describe-stacks --stack-name grafana \ 17 | --query "Stacks[0].Outputs[?starts_with(OutputKey, 'MyFargateServiceServiceURL')].OutputValue" --output text 18 | ) 19 | 20 | grafana_secret_name=$( 21 | aws cloudformation describe-stacks --stack-name grafana \ 22 | --query "Stacks[0].Outputs[?OutputKey=='GrafanaAdminSecret'].OutputValue" --output text 23 | ) 24 | grafana_secret_name_no_suffix=${grafana_secret_name%-*} 25 | grafana_admin_password=$( 26 | aws secretsmanager get-secret-value --secret-id ${grafana_secret_name_no_suffix} \ 27 | --query "SecretString" --output text 28 | ) 29 | 30 | # Create grafana API token 31 | grafana_token=$( 32 | curl -X POST -u "admin:${grafana_admin_password}" \ 33 | -H "Content-Type: application/json" \ 34 | -d '{"name":"apikeycurl", "role": "Admin"}' \ 35 | "${grafana_url}"/api/auth/keys | 36 | jq -r .key 37 | ) 38 | echo "Created Grafana API token ${grafana_token}" 39 | printf "\n" 40 | 41 | # Use grafana API to create data source and dashboard 42 | curl -X POST -k \ 43 | -H "Content-Type: application/json" -H "Authorization: Bearer ${grafana_token}" \ 44 | -d @./cdk/stacks/grafana/datasource.json \ 45 | "${grafana_url}"/api/datasources 46 | printf "\n" 47 | 48 | curl -X POST -k \ 49 | -H "Content-Type: application/json" -H "Authorization: Bearer ${grafana_token}" \ 50 | -d @./cdk/stacks/grafana/dashboard.json \ 51 | "${grafana_url}"/api/dashboards/db 52 | printf "\n" 53 | echo "Now you can head to ${grafana_url} to check newly created dashboard." 54 | echo "Using credentials admin:${grafana_admin_password}" 55 | printf "\n" 56 | -------------------------------------------------------------------------------- /cdk/stacks/amazon_timestream_stack.py: -------------------------------------------------------------------------------- 1 | # Copyright Amazon.com, Inc. and its affiliates. All Rights Reserved. 2 | # SPDX-License-Identifier: MIT-0 3 | 4 | from aws_cdk import ( 5 | aws_timestream as timestream, 6 | core 7 | ) 8 | 9 | 10 | class AmazonTimeStreamStack(core.Stack): 11 | 12 | def __init__(self, scope: core.Construct, construct_id: str, **kwargs) -> None: 13 | super().__init__(scope, construct_id, **kwargs) 14 | 15 | memory_retention_param = core.CfnParameter(self, "memoryRetentionParam", type="Number", 16 | min_value=1, max_value=8766, default=6, 17 | description="The duration (in hours) for which data must be retained " 18 | "in the memory store per table.") 19 | 20 | magnetic_retention_param = core.CfnParameter(self, "magneticRetentionParam", type="Number", 21 | min_value=1, max_value=73000, default=15, 22 | description="The duration (in days) for which data must be retained " 23 | "in the magnetic store per table.") 24 | 25 | database = timestream.CfnDatabase(self, id="TimestreamDatabase", database_name="TimestreamDB") 26 | 27 | retention = { 28 | "MemoryStoreRetentionPeriodInHours": memory_retention_param.value_as_number, 29 | "MagneticStoreRetentionPeriodInDays": magnetic_retention_param.value_as_number 30 | } 31 | 32 | table = timestream.CfnTable(self, "SampleMetricsTable", database_name=database.database_name, 33 | retention_properties=retention, 34 | table_name="SampleMetricsTable") 35 | table.add_depends_on(database) 36 | 37 | self._database = database 38 | self._table = table 39 | 40 | @property 41 | def database(self) -> timestream.CfnDatabase: 42 | return self._database 43 | 44 | @property 45 | def table(self) -> timestream.CfnTable: 46 | return self._table 47 | -------------------------------------------------------------------------------- /cdk/stacks/sample_kinesis_stream_producer/producer_lambda/app.py: -------------------------------------------------------------------------------- 1 | # Copyright Amazon.com, Inc. and its affiliates. All Rights Reserved. 2 | # SPDX-License-Identifier: MIT-0 3 | 4 | import json 5 | import random 6 | import time 7 | 8 | import boto3 9 | from config import device_ids, measures, iterations, chance_of_anomaly 10 | 11 | kinesis = boto3.client('kinesis') 12 | 13 | 14 | def handler(event, context): 15 | sent = 0 16 | for x in range(iterations): 17 | records = [] 18 | for device_id in device_ids: 19 | records.append(prepare_record(measures, device_id, 20 | (iterations - x) / iterations * 60)) 21 | 22 | print("records {}".format(len(records))) 23 | write_records(event["Stream"], records) 24 | sent = sent + len(records) 25 | return {"records": sent} 26 | 27 | 28 | def prepare_record(some_measures, device_id, delta_seconds): 29 | current_time = int((time.time() - delta_seconds) * 1000) 30 | record = { 31 | 'Time': current_time, 32 | 'DeviceID': device_id, 33 | } 34 | 35 | for measure_field in some_measures: 36 | measure_value = random.uniform(measure_field['start'], measure_field['end']) 37 | if random.random() < chance_of_anomaly: 38 | if random.random() > 0.5: 39 | measure_value = measure_value + measure_field['end'] 40 | else: 41 | measure_value = measure_value - measure_field['start'] 42 | record[measure_field['measure'] + '_measure'] = measure_value 43 | 44 | return record 45 | 46 | 47 | def write_records(stream_name, records): 48 | kinesis_records = [] 49 | for record in records: 50 | kinesis_records.append({ 51 | 'Data': json.dumps(record), 52 | 'PartitionKey': record["DeviceID"] 53 | }) 54 | 55 | result = None 56 | try: 57 | result = kinesis.put_records( 58 | StreamName=stream_name, 59 | Records=kinesis_records, ) 60 | 61 | status = result['ResponseMetadata']['HTTPStatusCode'] 62 | print("Processed %d records. WriteRecords Status: %s" % 63 | (len(records), status)) 64 | except Exception as err: 65 | print("Error:", err) 66 | if result is not None: 67 | print("Result:{}".format(result)) 68 | -------------------------------------------------------------------------------- /cdk/stacks/sample_kinesis_stream_producer/README.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | # Sample Producer Lambda 4 | 5 | ## Configuration 6 | 7 | The lambda function will read configuration defined in `config.py` and interpret it as following 8 | 9 | |Configuration|Description| 10 | |:------------|:----------| 11 | |device_ids|List of devices that will produce reading records for| 12 | |measures|List of measures to produce values for, every measure would nead to provide the following
- measure: the measure name, examples (humidity, voltage, ...etc.)
- start and end: 2 double values that the function will use to generate a uniformly distributed random value in this range. All measuments will contain only double values for this sample| 13 | |iterations| Number of times to rpeat the process of generating values for every device whenever the function is called| 14 | |chance_of_anomaly| Chance producer would intorduce a data point outside the range of start and end for a measure. Chance is uniform across all devices and number of iterations| 15 | 16 | ## Execution 17 | 18 | Main producer function expects a `JSON` payload that contains the Amazon Kinesis Data stream name to send events to. 19 | 20 | ```json 21 | { 22 | "Stream": "TestKinesisDataStreamName" 23 | } 24 | ``` 25 | 26 | Once it runs it picks up measurements configurations and constructs an event to be sent to the data stream name as 27 | follows. 28 | 29 | 1. Loops over device ids and constructs a record skeleton 30 | ```json 31 | { 32 | "Time": "current_time", 33 | "DeviceID": "device_id" 34 | } 35 | ``` 36 | 2. Loops over measures 37 | 3. Add fields to the record as follows 38 | ```json 39 | { 40 | "measures.measure + _measure": "random(measures.measure.start, measures.measure.end)" 41 | } 42 | ``` 43 | 44 | Producing records similar to bellow example and adding them to the Kinesis data stream. 45 | 46 | ```json 47 | { 48 | "Time": 1609757175225.499, 49 | "DeviceID": "1aeb6e58-9d5b-4fd6-a5c3-6f7dd09a150d", 50 | "temperature_measure": 15.5, 51 | "humidity_measure": 70.3, 52 | "voltage_measure": 39.7, 53 | "watt_measure": 301.4 54 | } 55 | ``` 56 | 57 | ## Schedule 58 | 59 | The lambda function is scheduled to be called by an Amazon EventBridge rule invoked at a 1 minuite rate. -------------------------------------------------------------------------------- /cdk/README.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | # Welcome to your CDK Python project! 4 | 5 | This is a blank project for Python development with CDK. 6 | 7 | The `cdk.json` file tells the CDK Toolkit how to execute your app. 8 | 9 | This project is set up like a standard Python project. The initialization process also creates a virtualenv within this 10 | project, stored under the `.venv` 11 | directory. To create the virtualenv it assumes that there is a `python3` 12 | (or `python` for Windows) executable in your path with access to the `venv` 13 | package. If for any reason the automatic creation of the virtualenv fails, you can create the virtualenv manually. 14 | 15 | To manually create a virtualenv on MacOS and Linux: 16 | 17 | ```shell 18 | $ python3 -m venv .venv 19 | ``` 20 | 21 | After the init process completes and the virtualenv is created, you can use the following step to activate your 22 | virtualenv. 23 | 24 | ```shell 25 | $ source .venv/bin/activate 26 | ``` 27 | 28 | If you are a Windows platform, you would activate the virtualenv like this: 29 | 30 | ```shell 31 | % .venv\Scripts\activate.bat 32 | ``` 33 | 34 | Once the virtualenv is activated, you can install the required dependencies. 35 | 36 | ```shell 37 | $ pip install -r requirements.txt 38 | ``` 39 | 40 | At this point you can now synthesize the CloudFormation template for this code. 41 | 42 | ```shell 43 | $ cdk synth 44 | ``` 45 | 46 | To add additional dependencies, for example other CDK libraries, just add them to your `setup.py` file and rerun 47 | the `pip install -r requirements.txt` 48 | command. 49 | 50 | ## Useful commands 51 | 52 | * `cdk ls` list all stacks in the app 53 | * `cdk synth` emits the synthesized CloudFormation template 54 | * `cdk deploy` deploy this stack to your default AWS account/region 55 | * `cdk diff` compare deployed stack with current state 56 | * `cdk destroy` Destroy the stacks already created 57 | * `cdk docs` open CDK documentation 58 | 59 | ## FAQ 60 | Q: When running `cdk deploy --all` I see the following error: 61 | ``` 62 | Error [ValidationError]: Template format error: Unrecognized resource types: [AWS::Timestream::Table, AWS::Timestream::Database] 63 | ``` 64 | 65 | A: That means you are trying to deploy in a region where Timestream is currently not supported. 66 | 67 | Enjoy! 68 | -------------------------------------------------------------------------------- /analytics-kotlin/build.gradle.kts: -------------------------------------------------------------------------------- 1 | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | // SPDX-License-Identifier: MIT-0 3 | 4 | plugins { 5 | java 6 | application 7 | kotlin("jvm") version "1.6.0" 8 | id("com.github.johnrengelman.shadow") version "7.1.0" 9 | `maven-publish` 10 | } 11 | 12 | publishing { 13 | publications { 14 | create("mavenJava") { 15 | pom { 16 | licenses { 17 | license { 18 | name.set("MIT No Attribution") 19 | url.set("https://opensource.org/licenses/MIT-0") 20 | } 21 | } 22 | } 23 | } 24 | } 25 | } 26 | 27 | group "com.amazonaws.services.kinesisanalytics" 28 | version "1.0" 29 | description "Flink Amazon TimeStream Kotlin sample" 30 | 31 | repositories { 32 | mavenCentral() 33 | } 34 | 35 | val javaVersion = "11" 36 | val flinkVersion = "1.11.1" 37 | val scalaBinaryVersion = "2.12" 38 | val kdaVersion = "1.2.0" 39 | val gsonVersion = "2.8.+" 40 | val timestreamSdkVersion = "1.+" 41 | val sl4jVersion = "1.7.+" 42 | val javaMainClass = "services.kinesisanalytics.StreamingJob" 43 | 44 | dependencies { 45 | implementation(kotlin("stdlib")) 46 | implementation("org.apache.flink:flink-java:$flinkVersion") 47 | implementation("org.apache.flink:flink-streaming-java_$scalaBinaryVersion:$flinkVersion") 48 | implementation("org.apache.flink:flink-clients_$scalaBinaryVersion:$flinkVersion") 49 | implementation("org.apache.flink:flink-connector-kinesis_$scalaBinaryVersion:$flinkVersion") 50 | implementation("com.google.code.gson:gson:$gsonVersion") 51 | implementation("com.amazonaws:aws-kinesisanalytics-runtime:$kdaVersion") 52 | implementation("com.amazonaws:aws-java-sdk-timestreamwrite:$timestreamSdkVersion") 53 | implementation("org.slf4j:slf4j-simple:$sl4jVersion") 54 | } 55 | 56 | java { 57 | toolchain { 58 | languageVersion.set(JavaLanguageVersion.of(javaVersion)) 59 | } 60 | } 61 | 62 | application { 63 | mainClass.set(javaMainClass) 64 | } 65 | 66 | tasks.jar { 67 | manifest { 68 | attributes( 69 | "Main-Class" to javaMainClass 70 | ) 71 | } 72 | } 73 | 74 | tasks.shadowJar { 75 | exclude("META-INF/*.SF") 76 | exclude("META-INF/*.DSA") 77 | exclude("META-INF/*.RSA") 78 | 79 | dependencies { 80 | exclude("org.apache.flink:force-shading") 81 | exclude("com.google.code.findbugs:jsr305") 82 | exclude("org.slf4j:*") 83 | exclude("log4j:*") 84 | } 85 | } 86 | -------------------------------------------------------------------------------- /analytics/src/main/java/com/amazonaws/services/kinesisanalytics/operators/JsonToTimestreamPayloadFn.java: -------------------------------------------------------------------------------- 1 | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | // SPDX-License-Identifier: MIT-0 3 | 4 | package com.amazonaws.services.kinesisanalytics.operators; 5 | 6 | import com.amazonaws.services.timestream.TimestreamPoint; 7 | import com.amazonaws.services.timestreamwrite.model.MeasureValueType; 8 | import com.google.common.reflect.TypeToken; 9 | import com.google.gson.Gson; 10 | import org.apache.flink.api.common.functions.RichMapFunction; 11 | import org.apache.flink.configuration.Configuration; 12 | import org.slf4j.Logger; 13 | import org.slf4j.LoggerFactory; 14 | 15 | import java.util.Collection; 16 | import java.util.HashMap; 17 | import java.util.Map; 18 | import java.util.stream.Collectors; 19 | 20 | public class JsonToTimestreamPayloadFn extends RichMapFunction> { 21 | 22 | private static final Logger LOG = LoggerFactory.getLogger(JsonToTimestreamPayloadFn.class); 23 | 24 | @Override 25 | public void open(Configuration parameters) throws Exception { 26 | super.open(parameters); 27 | } 28 | 29 | @Override 30 | public Collection map(String jsonString) { 31 | HashMap map = new Gson().fromJson(jsonString, 32 | new TypeToken>() { 33 | }.getType()); 34 | TimestreamPoint basePoint = new TimestreamPoint(); 35 | LOG.info("will map entity {}", map); 36 | Map measures = new HashMap<>(map.size()); 37 | 38 | for (Map.Entry entry : map.entrySet()) { 39 | String key = entry.getKey(); 40 | String value = entry.getValue(); 41 | // assuming these fields are present in every JSON record 42 | if (key.toLowerCase().endsWith("_measure")) { 43 | measures.put(key, value); 44 | continue; 45 | } 46 | 47 | switch (key.toLowerCase()) { 48 | case "time": 49 | basePoint.setTime(Long.parseLong(value)); 50 | break; 51 | case "timeunit": 52 | basePoint.setTimeUnit(value); 53 | break; 54 | default: 55 | basePoint.addDimension(key, value); 56 | } 57 | } 58 | 59 | LOG.info("mapped to point {}", basePoint); 60 | return measures.entrySet().stream() 61 | .map(measure -> new TimestreamPoint( 62 | basePoint, measure.getKey(), measure.getValue(), 63 | MeasureValueType.DOUBLE)) 64 | .collect(Collectors.toList()); 65 | } 66 | } 67 | -------------------------------------------------------------------------------- /cdk/stacks/sample_kinesis_stream_producer/producer_lambda/config.py: -------------------------------------------------------------------------------- 1 | # Copyright Amazon.com, Inc. and its affiliates. All Rights Reserved. 2 | # SPDX-License-Identifier: MIT-0 3 | 4 | # Random pre-generated UUIDs to simulate actual device IDs 5 | device_ids = [ 6 | "1aeb6e58-9d5b-4fd6-a5c3-6f7dd09a150d", 7 | "f0895567-f6c9-4301-8233-3c34529ccfd7", 8 | "3fdb85b3-3403-41b9-811e-18f9bc0d6a15", 9 | "02a6a13d-0c5c-47f1-b70c-e0712e30e5d9", 10 | "eb9fb558-9905-4be5-9b96-f9045dea093d", 11 | "17002925-67d1-4575-8569-33b296cc3144", 12 | "dd2c1d50-ae38-4213-ac4f-a5f13c36d34b", 13 | "ceba9fb9-bc82-4b41-bb31-b23bf997e67d", 14 | "9123e13c-c271-4a9e-b404-4931fb334384", 15 | "04cfe7fa-3927-47b1-ba55-8d6e4cff5bfd", 16 | "2bd61428-47c6-4167-aa4d-1556eea53d55", 17 | "f3f36c4c-6d27-46fb-ba89-4978c63dbfa1", 18 | "caff4f86-781f-4dad-ad36-9949743dc3f1", 19 | "727c71c7-d756-444d-9272-bee43bb4cd84", 20 | "38ad264c-9133-495b-80d9-b3fb3911dd2b", 21 | "fb441f0d-eb70-40b8-982f-00102da45eed", 22 | "5f8b0a91-7ffc-471a-94fa-634a66b03874", 23 | "780fe42f-852f-477e-88b9-ae2e10d67450", 24 | "64ee6df6-7a88-450a-8ca3-88a7a909552e", 25 | "78bc2a48-e42e-47b5-89aa-77fb15bae460", 26 | "28df91dd-1467-450d-a2df-9fb0e88782a3", 27 | "28c6d27e-8bdf-4501-9246-96987437b369", 28 | "454a2706-ea65-4b8d-858d-9e90976884cc", 29 | "95d09a60-a842-4d04-aaa6-74f14a99faff", 30 | "64f38ad3-2033-4a1d-8a95-e197ee432356", 31 | "2277bb40-b52a-46f3-97ce-27820a41c150", 32 | "c2d6ed31-5fce-411e-baa1-632f15f56d80", 33 | "94ab5f1d-8ede-44a0-8663-c9c7b9369be2", 34 | "46c69942-1021-490d-a1e8-3c494fda161a", 35 | "974779f6-e026-4177-ab65-352b71b4b31c", 36 | "5474b5b6-a992-4f42-b347-41c20f730a97", 37 | "a8200752-8452-42c5-966d-136a350ff2d3", 38 | "5b53beaf-47be-4411-bcbe-f7656343a475", 39 | "534ac47e-d0b9-4ed9-98c5-89c567ecb1d4", 40 | "cd2685d0-b9f6-4eb4-811e-8830b020ff3a", 41 | "157e388f-c705-4ffd-a714-0372596a6b77", 42 | "1a0954b9-2090-4bc9-a482-117e2199a4c1", 43 | "876cbbed-0066-4aab-87ba-b89734a7a918", 44 | "3114bf66-9d16-4bb5-8038-63e411bb625a", 45 | "4a98bdc1-a256-496a-a21a-18531dbaf336", 46 | "1dd59859-36fe-419a-bdf9-3179a8c04cd0", 47 | "51255266-0077-4e24-a2f7-0daf56da96f3", 48 | "7be371d8-bef4-4851-a25e-7a5fe43c6b34", 49 | "77e4cc05-a6fe-4b70-b39d-d70b89780ced", 50 | "a859b47e-1517-4f07-ba69-36bc3d49d1dd", 51 | "2b577591-0441-4f57-b395-33df5ac11426", 52 | "e9228460-d7b4-4825-aba4-8581ce49c5e8", 53 | "b2cb9991-36f2-44fc-86ec-d54947dd0e0f", 54 | "d80c3939-a04b-404a-a2cb-017b7f9f4c2d", 55 | "32d3c849-4b0f-45f6-9d94-11269ddbdb59" 56 | ] 57 | 58 | measures = [ 59 | {'measure': 'temperature', 'start': 10, 'end': 15}, 60 | {'measure': 'humidity', 'start': 50, 'end': 100}, 61 | {'measure': 'voltage', 'start': 30, 'end': 70}, 62 | {'measure': 'watt', 'start': 200, 'end': 400}, 63 | ] 64 | 65 | iterations = 10 66 | 67 | chance_of_anomaly = 0.051 68 | -------------------------------------------------------------------------------- /analytics-kotlin/src/main/kotlin/services/timestream/TimestreamInitializer.kt: -------------------------------------------------------------------------------- 1 | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | // SPDX-License-Identifier: MIT-0 3 | 4 | package services.timestream 5 | 6 | import com.amazonaws.ClientConfiguration 7 | import com.amazonaws.services.timestreamwrite.AmazonTimestreamWrite 8 | import com.amazonaws.services.timestreamwrite.AmazonTimestreamWriteClientBuilder 9 | import com.amazonaws.services.timestreamwrite.model.ConflictException 10 | import com.amazonaws.services.timestreamwrite.model.CreateDatabaseRequest 11 | import com.amazonaws.services.timestreamwrite.model.CreateTableRequest 12 | import com.amazonaws.services.timestreamwrite.model.RetentionProperties 13 | import org.slf4j.LoggerFactory 14 | 15 | /** 16 | * Checks if required database and table exists in Timestream. If they do not exists, it creates them 17 | */ 18 | class TimestreamInitializer(region: String) { 19 | companion object { 20 | private val LOG = LoggerFactory.getLogger(TimestreamInitializer::class.java) 21 | private const val HT_TTL_HOURS = 24L 22 | private const val CT_TTL_DAYS = 7L 23 | } 24 | 25 | private val writeClient: AmazonTimestreamWrite 26 | 27 | init { 28 | val clientConfiguration = ClientConfiguration() 29 | .withMaxConnections(5000) 30 | .withRequestTimeout(20 * 1000) 31 | .withMaxErrorRetry(10) 32 | writeClient = AmazonTimestreamWriteClientBuilder 33 | .standard() 34 | .withRegion(region) 35 | .withClientConfiguration(clientConfiguration) 36 | .build() 37 | } 38 | 39 | fun createDatabase(databaseName: String) { 40 | LOG.info("Creating database") 41 | val request = CreateDatabaseRequest() 42 | request.databaseName = databaseName 43 | try { 44 | writeClient.createDatabase(request) 45 | LOG.info("Database [$databaseName] created successfully") 46 | } catch (e: ConflictException) { 47 | LOG.info("Database [$databaseName] exists. Skipping database creation") 48 | } 49 | } 50 | 51 | fun createTable(databaseName: String, tableName: String) { 52 | LOG.info("Creating table") 53 | val createTableRequest = CreateTableRequest() 54 | createTableRequest.databaseName = databaseName 55 | createTableRequest.tableName = tableName 56 | val retentionProperties = RetentionProperties() 57 | .withMemoryStoreRetentionPeriodInHours(HT_TTL_HOURS) 58 | .withMagneticStoreRetentionPeriodInDays(CT_TTL_DAYS) 59 | createTableRequest.retentionProperties = retentionProperties 60 | try { 61 | writeClient.createTable(createTableRequest) 62 | LOG.info("Table [$tableName] successfully created.") 63 | } catch (e: ConflictException) { 64 | LOG.info("Table [$tableName] exists on database [$databaseName]. Skipping table creation") 65 | } 66 | } 67 | } -------------------------------------------------------------------------------- /redeploy-kda-app.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # 4 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 5 | # SPDX-License-Identifier: MIT-0 6 | 7 | app=${1:-java} 8 | case $app in 9 | kotlin) app_path="./analytics-kotlin/build/libs/analytics-timestream-kotlin-sample-all.jar" 10 | ;; 11 | \?) app="./analytics/target/analytics-timestream-java-sample-1.0.jar" 12 | ;; 13 | esac 14 | app_path="./analytics-kotlin/build/libs/analytics-timestream-kotlin-sample-all.jar" 15 | 16 | wait_for_app_status () { 17 | n=0 18 | until [ "$n" -ge 20 ] 19 | do 20 | status=$( 21 | aws kinesisanalyticsv2 describe-application --application-name amazon-kinesis-analytics \ 22 | | jq -r '.ApplicationDetail.ApplicationStatus' 23 | ) 24 | if [ $status == $1 ] 25 | then 26 | echo "Application is now $1" 27 | break 28 | fi 29 | echo "Waiting for $1 status for analytics app, sleeping 15 seconds" 30 | n=$((n+1)) 31 | sleep 15 32 | done 33 | } 34 | 35 | # Get Amazon Kinesis Data Analytics app description 36 | kda_app_desc=$(aws kinesisanalyticsv2 describe-application --application-name amazon-kinesis-analytics) 37 | 38 | # Determine S3 assets location 39 | kda_code_path='.ApplicationDetail.ApplicationConfigurationDescription.ApplicationCodeConfigurationDescription.CodeContentDescription.S3ApplicationCodeLocationDescription' 40 | s3_url_exp='('${kda_code_path}'.BucketARN+'\"/\"'+'${kda_code_path}'.FileKey)' 41 | 42 | s3_url=$(echo ${kda_app_desc} | jq -r ${s3_url_exp}) 43 | s3_url='s3://'${s3_url:13} 44 | aws s3 cp ${app_path} ${s3_url} 45 | echo "Copied new application assets from ${app_path} to ${s3_url}" 46 | 47 | # Stop Amazon Kinesis Data Analytics app 48 | nohup aws kinesisanalyticsv2 stop-application --application-name amazon-kinesis-analytics \ 49 | &>/dev/null & 50 | echo "Stopping analytics app" 51 | printf "\n" 52 | wait_for_app_status 'READY' 53 | echo "Stopped analytics app" 54 | 55 | # Update Amazon Kinesis Data Analytics app 56 | s3_key=$( 57 | echo ${kda_app_desc} \ 58 | | jq '.ApplicationDetail.ApplicationConfigurationDescription.ApplicationCodeConfigurationDescription 59 | .CodeContentDescription.S3ApplicationCodeLocationDescription.FileKey' 60 | ) 61 | nohup aws kinesisanalyticsv2 update-application --application-name amazon-kinesis-analytics \ 62 | --application-configuration-update '{ "ApplicationCodeConfigurationUpdate": { "CodeContentUpdate": { "S3ContentLocationUpdate": { "FileKeyUpdate": ${s3_key} } } } }' \ 63 | &>/dev/null & 64 | 65 | wait_for_app_status 'READY' 66 | echo "Updated analytics app" 67 | 68 | # Run Amazon Kinesis Data Analytics app 69 | echo "Starting analytics app" 70 | nohup aws kinesisanalyticsv2 start-application --application-name amazon-kinesis-analytics \ 71 | --run-configuration '{ "ApplicationRestoreConfiguration": { "ApplicationRestoreType": "SKIP_RESTORE_FROM_SNAPSHOT" } }' \ 72 | &>/dev/null & 73 | wait_for_app_status 'RUNNING' 74 | echo "Started analytics app" 75 | -------------------------------------------------------------------------------- /analytics-kotlin/README.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | # Apache Flink sample data connector 4 | 5 | Sample application that reads data from Amazon Kinesis Data Streams and writes to Amazon Timestream. This is similar to 6 | the one in `../analytics` but uses `Kotlin` programming language and `Gradle` tooling. 7 | 8 | ---- 9 | 10 | ## How to test it 11 | 12 | Java 11 is the recommended version for using Amazon Kinesis Data Analytics for Apache Flink Application. If you have 13 | multiple Java versions ensure to export Java 11 to your `JAVA_HOME` environment variable. 14 | 15 | 1. Optional for local testing: Create an Amazon Kinesis Data Stream with the name "TimestreamTestStream". You can use 16 | the below AWS CLI command: 17 | ```shell 18 | $ aws kinesis create-stream --stream-name TimestreamTestStream --shard-count 1 19 | ``` 20 | 21 | 1. Optional for local testing: Compile and run the sample app locally. 22 | 23 | Upon start up the application checks if a Timestream database and tables exists and tries to create one if it can not 24 | find them. 25 | ```shell 26 | $ ./gradlew clean build 27 | $ ./gradlew run --args="--InputStreamName TimestreamTestStream --TimestreamDbName TimestreamTestDatabase --TimestreamTableName TestTable" 28 | ``` 29 | NOTE: You might need to change the version of timestreamwrite and timestreamquery dependencies in `build.gradle` file 30 | based on the version of SDK jar you are using. 31 | 32 | By default this sample app batches Timestream ingest records in batch of 75. This can be adjusted 33 | using `--TimestreamIngestBatchSize` option. 34 | ```shell 35 | $ ./gradlew clean compile 36 | $ ./gradlew run --args="--InputStreamName TimestreamTestStream --TimestreamDbName TimestreamTestDatabase --TimestreamTableName TestTable --TimestreamIngestBatchSize 75" 37 | ``` 38 | 1. Package application for deployment in Amazon Kinesis Data Analytics for Apache Flink 39 | 40 | ```shell 41 | $ ./gradlew clean shadowJar 42 | ``` 43 | This will create a `jar` package in the directory `./build/libs/`. Use the fat jar package for deployment as it 44 | contains all needed dependencies. 45 | 46 | ## For sending data into the Amazon Kinesis Data Stream 47 | 48 | You can deploy the lambda found in `../cdk/stacks/sample_kinesis_stream_producer/producer_lambda`. Or you can use the 49 | instructions on 50 | [sample script to generate a continuous stream of records that are ingested into Timestream](https://github.com/awslabs/amazon-timestream-tools/tree/master/tools/kinesis_ingestor) 51 | as guideline. 52 | 53 | ## For deploying the sample application to Kinesis Data Analytics for Apache Flink 54 | 55 | This sample application is part of the setup for transferring your time series data from Amazon Kinesis directly into 56 | Amazon Timestream. 57 | 58 | For the full set of instructions 59 | [check information here](https://docs.aws.amazon.com/timestream/latest/developerguide/ApacheFlink.html) -------------------------------------------------------------------------------- /analytics-kotlin/gradlew.bat: -------------------------------------------------------------------------------- 1 | @rem 2 | @rem Copyright 2015 the original author or authors. 3 | @rem 4 | @rem Licensed under the Apache License, Version 2.0 (the "License"); 5 | @rem you may not use this file except in compliance with the License. 6 | @rem You may obtain a copy of the License at 7 | @rem 8 | @rem https://www.apache.org/licenses/LICENSE-2.0 9 | @rem 10 | @rem Unless required by applicable law or agreed to in writing, software 11 | @rem distributed under the License is distributed on an "AS IS" BASIS, 12 | @rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | @rem See the License for the specific language governing permissions and 14 | @rem limitations under the License. 15 | @rem 16 | 17 | @if "%DEBUG%" == "" @echo off 18 | @rem ########################################################################## 19 | @rem 20 | @rem Gradle startup script for Windows 21 | @rem 22 | @rem ########################################################################## 23 | 24 | @rem Set local scope for the variables with windows NT shell 25 | if "%OS%"=="Windows_NT" setlocal 26 | 27 | set DIRNAME=%~dp0 28 | if "%DIRNAME%" == "" set DIRNAME=. 29 | set APP_BASE_NAME=%~n0 30 | set APP_HOME=%DIRNAME% 31 | 32 | @rem Resolve any "." and ".." in APP_HOME to make it shorter. 33 | for %%i in ("%APP_HOME%") do set APP_HOME=%%~fi 34 | 35 | @rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. 36 | set DEFAULT_JVM_OPTS="-Xmx64m" "-Xms64m" 37 | 38 | @rem Find java.exe 39 | if defined JAVA_HOME goto findJavaFromJavaHome 40 | 41 | set JAVA_EXE=java.exe 42 | %JAVA_EXE% -version >NUL 2>&1 43 | if "%ERRORLEVEL%" == "0" goto execute 44 | 45 | echo. 46 | echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. 47 | echo. 48 | echo Please set the JAVA_HOME variable in your environment to match the 49 | echo location of your Java installation. 50 | 51 | goto fail 52 | 53 | :findJavaFromJavaHome 54 | set JAVA_HOME=%JAVA_HOME:"=% 55 | set JAVA_EXE=%JAVA_HOME%/bin/java.exe 56 | 57 | if exist "%JAVA_EXE%" goto execute 58 | 59 | echo. 60 | echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% 61 | echo. 62 | echo Please set the JAVA_HOME variable in your environment to match the 63 | echo location of your Java installation. 64 | 65 | goto fail 66 | 67 | :execute 68 | @rem Setup the command line 69 | 70 | set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar 71 | 72 | 73 | @rem Execute Gradle 74 | "%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %* 75 | 76 | :end 77 | @rem End local scope for the variables with windows NT shell 78 | if "%ERRORLEVEL%"=="0" goto mainEnd 79 | 80 | :fail 81 | rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of 82 | rem the _cmd.exe /c_ return code! 83 | if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1 84 | exit /b 1 85 | 86 | :mainEnd 87 | if "%OS%"=="Windows_NT" endlocal 88 | 89 | :omega 90 | -------------------------------------------------------------------------------- /cdk/app.py: -------------------------------------------------------------------------------- 1 | # !/usr/bin/env python3 2 | 3 | # Copyright Amazon.com, Inc. and its affiliates. All Rights Reserved. 4 | # SPDX-License-Identifier: MIT-0 5 | 6 | from aws_cdk import core 7 | from pathlib import Path 8 | 9 | from stacks.amazon_timestream_stack import AmazonTimeStreamStack 10 | from stacks.grafana.grafana_stack import GrafanaStack 11 | from stacks.kinesis.amazon_kinesis_analytics_source_stack import KinesisAnalyticsSource 12 | from stacks.kinesis.amazon_kinesis_analytics_stack import KinesisAnalyticsStack 13 | from stacks.kinesis.amazon_kinesis_stream_stack import KinesisStreamStack 14 | from stacks.sample_kinesis_stream_producer.sample_kinesis_stream_producer_stack import SampleKinesisStreamProducerStack 15 | 16 | app = core.App() 17 | 18 | kda_path = app.node.try_get_context("kda_path") 19 | 20 | if kda_path is None: 21 | kda_path = "../analytics/target/analytics-timestream-java-sample-1.0.jar" 22 | print("No context defined variable kda_path for Amazon Kinesis Data Analytics for Apache Flink " 23 | "application jar file path defined. Will use default path <" + kda_path + ">.") 24 | 25 | if not Path(kda_path).is_file(): 26 | print("Warning, Apache Flink application jar file not found: <" 27 | + kda_path + ">. Make sure file exists or you've built default application, " 28 | "check analytics/README.md or analytics-kotlin/README.md for more information") 29 | 30 | timestream_stack = AmazonTimeStreamStack(app, "amazon-timestream") 31 | 32 | kinesis_stream = KinesisStreamStack(app, 'amazon-kinesis-stream') 33 | 34 | kinesis_analytics_source_stack = KinesisAnalyticsSource(app, "flink-source", 35 | stream=kinesis_stream.stream, 36 | kda_path=kda_path, 37 | database=timestream_stack.database, 38 | table=timestream_stack.table) 39 | 40 | stream_producer_stack = SampleKinesisStreamProducerStack(app, "sample-kinesis-stream-producer", 41 | stream=kinesis_stream.stream) 42 | 43 | kinesis_analytics_stack = KinesisAnalyticsStack(app, "amazon-kinesis-analytics", stream=kinesis_stream.stream, 44 | db_name=timestream_stack.database.database_name, 45 | table_name=timestream_stack.table.table_name, 46 | kda_role=kinesis_analytics_source_stack.kda_role, 47 | log_group_name=kinesis_analytics_source_stack.log_group_name, 48 | log_stream_name=kinesis_analytics_source_stack.log_stream_name, 49 | asset=kinesis_analytics_source_stack.asset) 50 | kinesis_analytics_stack.add_dependency(kinesis_analytics_source_stack) 51 | 52 | grafana_stack = GrafanaStack(app, "grafana", 53 | database=timestream_stack.database, table=timestream_stack.table) 54 | 55 | app.synth() 56 | -------------------------------------------------------------------------------- /analytics/src/main/java/com/amazonaws/services/timestream/TimestreamInitializer.java: -------------------------------------------------------------------------------- 1 | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | // SPDX-License-Identifier: MIT-0 3 | 4 | package com.amazonaws.services.timestream; 5 | 6 | import com.amazonaws.ClientConfiguration; 7 | import com.amazonaws.services.timestreamwrite.AmazonTimestreamWrite; 8 | import com.amazonaws.services.timestreamwrite.AmazonTimestreamWriteClientBuilder; 9 | import com.amazonaws.services.timestreamwrite.model.ConflictException; 10 | import com.amazonaws.services.timestreamwrite.model.CreateDatabaseRequest; 11 | import com.amazonaws.services.timestreamwrite.model.CreateTableRequest; 12 | import com.amazonaws.services.timestreamwrite.model.RetentionProperties; 13 | import org.slf4j.Logger; 14 | import org.slf4j.LoggerFactory; 15 | 16 | /** 17 | * Checks if required database and table exists in Timestream. If they do not exists, it creates them 18 | */ 19 | public class TimestreamInitializer { 20 | private static final long HT_TTL_HOURS = 24L; 21 | private static final long CT_TTL_DAYS = 7L; 22 | private final Logger logger = LoggerFactory.getLogger(getClass()); 23 | private AmazonTimestreamWrite writeClient; 24 | 25 | public TimestreamInitializer(String region) { 26 | final ClientConfiguration clientConfiguration = new ClientConfiguration() 27 | .withMaxConnections(5000) 28 | .withRequestTimeout(20 * 1000) 29 | .withMaxErrorRetry(10); 30 | 31 | this.writeClient = AmazonTimestreamWriteClientBuilder 32 | .standard() 33 | .withRegion(region) 34 | .withClientConfiguration(clientConfiguration) 35 | .build(); 36 | } 37 | 38 | public void createDatabase(String databaseName) { 39 | logger.info("Creating database"); 40 | CreateDatabaseRequest request = new CreateDatabaseRequest(); 41 | request.setDatabaseName(databaseName); 42 | try { 43 | writeClient.createDatabase(request); 44 | logger.info("Database [" + databaseName + "] created successfully"); 45 | } catch (ConflictException e) { 46 | logger.info("Database [" + databaseName + "] exists. Skipping database creation"); 47 | } 48 | } 49 | 50 | public void createTable(String databaseName, String tableName) { 51 | logger.info("Creating table"); 52 | CreateTableRequest createTableRequest = new CreateTableRequest(); 53 | createTableRequest.setDatabaseName(databaseName); 54 | createTableRequest.setTableName(tableName); 55 | final RetentionProperties retentionProperties = new RetentionProperties() 56 | .withMemoryStoreRetentionPeriodInHours(HT_TTL_HOURS) 57 | .withMagneticStoreRetentionPeriodInDays(CT_TTL_DAYS); 58 | createTableRequest.setRetentionProperties(retentionProperties); 59 | 60 | try { 61 | writeClient.createTable(createTableRequest); 62 | logger.info("Table [" + tableName + "] successfully created."); 63 | } catch (ConflictException e) { 64 | logger.info("Table [" + tableName + "] exists on database [" + databaseName + "]. Skipping table creation"); 65 | } 66 | } 67 | } 68 | -------------------------------------------------------------------------------- /analytics/README.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | # Apache Flink sample data connector 4 | 5 | Sample application that reads data from Amazon Kinesis Data Streams and writes to Amazon Timestream 6 | 7 | ---- 8 | 9 | ## How to test it 10 | 11 | Java 11 is the recommended version for using Amazon Kinesis Data Analytics for Apache Flink Application. If you have 12 | multiple Java versions ensure to export Java 11 to your `JAVA_HOME` environment variable. 13 | 14 | 1. Ensure that you have [Apache Maven](https://maven.apache.org/install.html) installed. You can test your Apache Maven 15 | install with the following command: 16 | ```shell 17 | $ mvn -version 18 | ``` 19 | 20 | 1. Optional for local testing: Create an Amazon Kinesis Data Stream with the name "TimestreamTestStream". You can use 21 | the below AWS CLI command: 22 | ```shell 23 | $ aws kinesis create-stream --stream-name TimestreamTestStream --shard-count 1 24 | ``` 25 | 26 | 1. Optional for local testing: Compile and run the sample app. 27 | 28 | Upon start up the application checks if a Timestream database and tables exists and tries to create one if it can not 29 | find them. 30 | ```shell 31 | $ mvn clean compile 32 | $ mvn exec:java -Dexec.mainClass="com.amazonaws.services.kinesisanalytics.StreamingJob" \ 33 | -Dexec.args="--InputStreamName TimestreamTestStream \ 34 | --TimestreamDbName TimestreamTestDatabase --TimestreamTableName TestTable" 35 | ``` 36 | NOTE: You might need to change the version of timestreamwrite and timestreamquery dependencies in `pom.xml` file 37 | based on the version of SDK jar you are using. 38 | 39 | By default this sample app batches Timestream ingest records in batch of 75. This can be adjusted 40 | using `--TimestreamIngestBatchSize` option. 41 | ```shell 42 | $ mvn clean compile 43 | $ mvn exec:java -Dexec.mainClass="com.amazonaws.services.kinesisanalytics.StreamingJob" \ 44 | -Dexec.args="--InputStreamName TimestreamTestStream --TimestreamDbName TimestreamTestDatabase \ 45 | --TimestreamTableName TestTable --TimestreamIngestBatchSize 75" 46 | ``` 47 | 1. Package application for deployment in Amazon Kinesis Data Analytics for Apache Flink 48 | 49 | ```shell 50 | $ mvn package 51 | ``` 52 | This will create a `jar` package in the directory `./target/`. Use the fat jar package for deployment as it contains 53 | all needed dependencies. 54 | 55 | ## For sending data into the Amazon Kinesis Data Stream 56 | 57 | You can deploy the lambda found in `../cdk/stacks/sample_kinesis_stream_producer/producer_lambda`. Or you can use the 58 | instructions on 59 | [sample script to generate a continuous stream of records that are ingested into Timestream](https://github.com/awslabs/amazon-timestream-tools/tree/master/tools/kinesis_ingestor) 60 | as guideline. 61 | 62 | ## For deploying the sample application to Kinesis Data Analytics for Apache Flink 63 | 64 | This sample application is part of the setup for transferring your time series data from Amazon Kinesis directly into 65 | Amazon Timestream. 66 | 67 | For the full set of instructions 68 | [check information here](https://docs.aws.amazon.com/timestream/latest/developerguide/ApacheFlink.html) -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing Guidelines 2 | 3 | Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional 4 | documentation, we greatly value feedback and contributions from our community. 5 | 6 | Please read through this document before submitting any issues or pull requests to ensure we have all the necessary 7 | information to effectively respond to your bug report or contribution. 8 | 9 | 10 | ## Reporting Bugs/Feature Requests 11 | 12 | We welcome you to use the GitHub issue tracker to report bugs or suggest features. 13 | 14 | When filing an issue, please check existing open, or recently closed, issues to make sure somebody else hasn't already 15 | reported the issue. Please try to include as much information as you can. Details like these are incredibly useful: 16 | 17 | * A reproducible test case or series of steps 18 | * The version of our code being used 19 | * Any modifications you've made relevant to the bug 20 | * Anything unusual about your environment or deployment 21 | 22 | 23 | ## Contributing via Pull Requests 24 | Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that: 25 | 26 | 1. You are working against the latest source on the *main* branch. 27 | 2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already. 28 | 3. You open an issue to discuss any significant work - we would hate for your time to be wasted. 29 | 30 | To send us a pull request, please: 31 | 32 | 1. Fork the repository. 33 | 2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change. 34 | 3. Ensure local tests pass. 35 | 4. Commit to your fork using clear commit messages. 36 | 5. Send us a pull request, answering any default questions in the pull request interface. 37 | 6. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation. 38 | 39 | GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and 40 | [creating a pull request](https://help.github.com/articles/creating-a-pull-request/). 41 | 42 | 43 | ## Finding contributions to work on 44 | Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any 'help wanted' issues is a great place to start. 45 | 46 | 47 | ## Code of Conduct 48 | This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). 49 | For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact 50 | opensource-codeofconduct@amazon.com with any additional questions or comments. 51 | 52 | 53 | ## Security issue notifications 54 | If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public github issue. 55 | 56 | 57 | ## Licensing 58 | 59 | See the [LICENSE](LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution. 60 | -------------------------------------------------------------------------------- /analytics/src/main/java/com/amazonaws/services/timestream/TimestreamPoint.java: -------------------------------------------------------------------------------- 1 | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | // SPDX-License-Identifier: MIT-0 3 | 4 | package com.amazonaws.services.timestream; 5 | 6 | import com.amazonaws.services.timestreamwrite.model.MeasureValueType; 7 | 8 | import java.util.HashMap; 9 | import java.util.Map; 10 | 11 | public class TimestreamPoint { 12 | private String measureName; 13 | private MeasureValueType measureValueType; 14 | private String measureValue; 15 | private long time; 16 | private String timeUnit; 17 | private Map dimensions; 18 | 19 | public TimestreamPoint() { 20 | this.dimensions = new HashMap<>(); 21 | } 22 | 23 | public TimestreamPoint(TimestreamPoint anotherPoint, 24 | String measureName, String measureValue, MeasureValueType measureValueType) { 25 | this.time = anotherPoint.time; 26 | this.timeUnit = anotherPoint.timeUnit; 27 | this.dimensions = new HashMap<>(anotherPoint.dimensions); 28 | this.measureName = measureName; 29 | this.measureValueType = measureValueType; 30 | this.measureValue = measureValue; 31 | } 32 | 33 | public TimestreamPoint(TimestreamPoint anotherPoint) { 34 | this(anotherPoint, anotherPoint.measureName, anotherPoint.measureValue, anotherPoint.measureValueType); 35 | } 36 | 37 | public String getMeasureName() { 38 | return measureName; 39 | } 40 | 41 | public void setMeasureName(String measureValue) { 42 | this.measureName = measureValue; 43 | } 44 | 45 | public String getMeasureValue() { 46 | return measureValue; 47 | } 48 | 49 | public void setMeasureValue(String measureValue) { 50 | this.measureValue = measureValue; 51 | } 52 | 53 | public MeasureValueType getMeasureValueType() { 54 | return measureValueType; 55 | } 56 | 57 | public void setMeasureValueType(MeasureValueType measureValueType) { 58 | this.measureValueType = measureValueType; 59 | } 60 | 61 | public void setMeasureValueType(String measureValueType) { 62 | this.measureValueType = MeasureValueType.fromValue(measureValueType.toUpperCase()); 63 | } 64 | 65 | public long getTime() { 66 | return time; 67 | } 68 | 69 | public void setTime(long time) { 70 | this.time = time; 71 | } 72 | 73 | public String getTimeUnit() { 74 | return timeUnit; 75 | } 76 | 77 | public void setTimeUnit(String timeUnit) { 78 | this.timeUnit = timeUnit; 79 | } 80 | 81 | public Map getDimensions() { 82 | return dimensions; 83 | } 84 | 85 | public void setDimensions(Map dims) { 86 | this.dimensions = new HashMap<>(dims); 87 | } 88 | 89 | public void addDimension(String dimensionName, String dimensionValue) { 90 | dimensions.put(dimensionName, dimensionValue); 91 | } 92 | 93 | @Override 94 | public String toString() { 95 | return "TimestreamPoint{" + 96 | "measureName='" + measureName + '\'' + 97 | ", measureValueType=" + measureValueType + 98 | ", measureValue='" + measureValue + '\'' + 99 | ", time=" + time + 100 | ", timeUnit='" + timeUnit + '\'' + 101 | ", dimensions=" + dimensions + 102 | '}'; 103 | } 104 | } 105 | -------------------------------------------------------------------------------- /cdk/stacks/kinesis/amazon_kinesis_analytics_source_stack.py: -------------------------------------------------------------------------------- 1 | # Copyright Amazon.com, Inc. and its affiliates. All Rights Reserved. 2 | # SPDX-License-Identifier: MIT-0 3 | # 4 | # Licensed under the MIT-0 License. See the LICENSE accompanying this file 5 | # for the specific language governing permissions and limitations under 6 | # the License. 7 | 8 | from aws_cdk import ( 9 | core, 10 | aws_cloudwatch as cloudwatch, 11 | aws_iam as iam, 12 | aws_kinesis as kinesis, 13 | aws_logs as logs, 14 | aws_s3_assets as assets, 15 | aws_timestream as timestream 16 | ) 17 | 18 | from aws_cdk.aws_logs import RetentionDays 19 | from aws_cdk.core import RemovalPolicy 20 | 21 | 22 | class KinesisAnalyticsSource(core.Stack): 23 | 24 | def __init__(self, scope: core.Construct, construct_id: str, 25 | stream: kinesis.IStream, kda_path: str, 26 | database: timestream.CfnDatabase, table: timestream.CfnTable, 27 | **kwargs) -> None: 28 | super().__init__(scope, construct_id, **kwargs) 29 | 30 | asset = assets.Asset(self, "flink-source", path=kda_path) 31 | 32 | log_group = logs.LogGroup(self, "KdaLogGroup", 33 | retention=RetentionDays.FIVE_DAYS, 34 | removal_policy=RemovalPolicy.DESTROY) 35 | log_stream = log_group.add_stream("KdaLogStream") 36 | 37 | kda_role = iam.Role(self, "KdaRole", 38 | assumed_by=iam.ServicePrincipal("kinesisanalytics.amazonaws.com"), 39 | ) 40 | 41 | asset.grant_read(kda_role) 42 | stream.grant_read(kda_role) 43 | cloudwatch.Metric.grant_put_metric_data(kda_role) 44 | log_group.grant(kda_role, "logs:DescribeLogStreams") 45 | log_group.grant_write(kda_role) 46 | 47 | kda_role.add_to_policy(iam.PolicyStatement( 48 | actions=["timestream:DescribeEndpoints", 49 | "timestream:ListTables", 50 | "timestream:ListDatabases", 51 | "timestream:DescribeTable", 52 | "timestream:DescribeDatabase", 53 | ], 54 | resources=["*"] 55 | )) 56 | 57 | kda_role.add_to_policy(iam.PolicyStatement( 58 | actions=["timestream:*Database"], 59 | resources=[database.attr_arn] 60 | )) 61 | 62 | kda_role.add_to_policy(iam.PolicyStatement( 63 | actions=["timestream:*Table", "timestream:WriteRecords"], 64 | resources=[table.attr_arn] 65 | )) 66 | 67 | kda_role.add_to_policy(iam.PolicyStatement( 68 | actions=["kms:DescribeKey"], 69 | resources=["*"] 70 | )) 71 | 72 | kda_role.add_to_policy(iam.PolicyStatement( 73 | actions=["kms:CreateGrant"], 74 | resources=["*"], 75 | conditions={ 76 | "ForAnyValue:StringEquals": { 77 | "kms:EncryptionContextKeys": "aws:timestream:database-name" 78 | }, 79 | "Bool": { 80 | "kms:GrantIsForAWSResource": True 81 | }, 82 | "StringLike": { 83 | "kms:ViaService": "timestream.*.amazonaws.com" 84 | } 85 | } 86 | )) 87 | 88 | kda_role.add_to_policy(iam.PolicyStatement(actions=["kinesis:ListShards"], resources=[stream.stream_arn])) 89 | 90 | self._asset = asset 91 | self._kda_role = kda_role 92 | self._log_group_name = log_group.log_group_name 93 | self._log_stream_name = log_stream.log_stream_name 94 | 95 | @property 96 | def asset(self) -> assets.Asset: 97 | return self._asset 98 | 99 | @property 100 | def kda_role(self) -> iam.IRole: 101 | return self._kda_role 102 | 103 | @property 104 | def log_group_name(self) -> str: 105 | return self._log_group_name 106 | 107 | @property 108 | def log_stream_name(self) -> str: 109 | return self._log_stream_name 110 | -------------------------------------------------------------------------------- /cdk/stacks/kinesis/amazon_kinesis_analytics_stack.py: -------------------------------------------------------------------------------- 1 | # Copyright Amazon.com, Inc. and its affiliates. All Rights Reserved. 2 | # SPDX-License-Identifier: MIT-0 3 | # 4 | # Licensed under the MIT-0 License. See the LICENSE accompanying this file 5 | # for the specific language governing permissions and limitations under 6 | # the License. 7 | 8 | from aws_cdk import ( 9 | core, 10 | aws_iam as iam, 11 | aws_kinesisanalytics as kda, 12 | aws_kinesis as kinesis, 13 | aws_s3_assets as assets 14 | ) 15 | 16 | 17 | class KinesisAnalyticsStack(core.Stack): 18 | 19 | def __init__(self, scope: core.Construct, construct_id: str, stream: kinesis.IStream, db_name: str, table_name: str, 20 | kda_role: iam.IRole, log_group_name: str, log_stream_name: str, asset: assets.Asset, **kwargs) -> None: 21 | super().__init__(scope, construct_id, **kwargs) 22 | 23 | batch_size_param = core.CfnParameter(self, "batchSizeParam", type="Number", 24 | min_value=1, max_value=100, default=75, 25 | description="Number of records ingested from stream before flushing to " 26 | "Timestream database") 27 | kda_application = kda.CfnApplicationV2( 28 | self, "KdaApplication", 29 | runtime_environment="FLINK-1_11", 30 | service_execution_role=kda_role.role_arn, 31 | application_name=core.Aws.STACK_NAME, 32 | application_configuration= 33 | kda.CfnApplicationV2.ApplicationConfigurationProperty( 34 | application_code_configuration=kda.CfnApplicationV2.ApplicationCodeConfigurationProperty( 35 | code_content=kda.CfnApplicationV2.CodeContentProperty( 36 | s3_content_location=kda.CfnApplicationV2.S3ContentLocationProperty( 37 | bucket_arn=asset.bucket.bucket_arn, 38 | file_key=asset.s3_object_key 39 | ) 40 | ), 41 | code_content_type="ZIPFILE" 42 | ), 43 | environment_properties=kda.CfnApplicationV2.EnvironmentPropertiesProperty( 44 | property_groups=[ 45 | kda.CfnApplicationV2.PropertyGroupProperty( 46 | property_group_id="FlinkApplicationProperties", 47 | property_map={ 48 | "InputStreamName": stream.stream_name, 49 | "Region": core.Aws.REGION, 50 | "TimestreamDbName": db_name, 51 | "TimestreamTableName": table_name, 52 | "TimestreamIngestBatchSize": batch_size_param.value_as_number 53 | } 54 | ) 55 | ] 56 | ), 57 | application_snapshot_configuration=kda.CfnApplicationV2.ApplicationSnapshotConfigurationProperty( 58 | snapshots_enabled=False 59 | ), 60 | flink_application_configuration=kda.CfnApplicationV2.FlinkApplicationConfigurationProperty( 61 | monitoring_configuration=kda.CfnApplicationV2.MonitoringConfigurationProperty( 62 | configuration_type="CUSTOM", 63 | log_level="INFO", 64 | metrics_level="TASK" 65 | ), 66 | parallelism_configuration=kda.CfnApplicationV2.ParallelismConfigurationProperty( 67 | configuration_type="CUSTOM", 68 | auto_scaling_enabled=False, 69 | parallelism=1, 70 | parallelism_per_kpu=1 71 | ), 72 | checkpoint_configuration=kda.CfnApplicationV2.CheckpointConfigurationProperty( 73 | configuration_type="CUSTOM", 74 | # the properties below are optional 75 | checkpointing_enabled=True, 76 | checkpoint_interval=60_000, 77 | min_pause_between_checkpoints=60_000 78 | ) 79 | ) 80 | ) 81 | ) 82 | 83 | kda_logging = kda.CfnApplicationCloudWatchLoggingOptionV2( 84 | self, "FlinkLogging", 85 | application_name=kda_application.application_name, 86 | cloud_watch_logging_option=kda.CfnApplicationCloudWatchLoggingOptionV2.CloudWatchLoggingOptionProperty( 87 | log_stream_arn="arn:{}:logs:{}:{}:log-group:{}:log-stream:{}".format( 88 | core.Aws.PARTITION, core.Aws.REGION, core.Aws.ACCOUNT_ID, 89 | log_group_name, log_stream_name))) 90 | 91 | kda_logging.add_depends_on(kda_application) 92 | 93 | core.CfnOutput(self, "KdaApplicationName", value=kda_application.application_name, 94 | export_name="KdaApplicationName") 95 | -------------------------------------------------------------------------------- /analytics-kotlin/src/main/kotlin/services/kinesisanalytics/StreamingJob.kt: -------------------------------------------------------------------------------- 1 | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | // SPDX-License-Identifier: MIT-0 3 | 4 | package services.kinesisanalytics 5 | 6 | import org.apache.flink.api.common.serialization.SimpleStringSchema 7 | import org.apache.flink.api.java.utils.ParameterTool 8 | import org.apache.flink.streaming.api.TimeCharacteristic 9 | import org.apache.flink.streaming.api.datastream.DataStream 10 | import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment 11 | import org.apache.flink.streaming.connectors.kinesis.FlinkKinesisConsumer 12 | import org.apache.flink.streaming.connectors.kinesis.config.AWSConfigConstants 13 | import org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants 14 | import services.kinesisanalytics.operators.JsonToTimestreamPayloadFn 15 | import services.kinesisanalytics.operators.OffsetFutureTimestreamPoints 16 | import services.kinesisanalytics.utils.ParameterToolUtils 17 | import services.timestream.TimestreamInitializer 18 | import services.timestream.TimestreamSink 19 | import java.util.* 20 | 21 | 22 | /** 23 | * Skeleton for a Flink Streaming Job. 24 | * 25 | *

For a tutorial how to write a Flink streaming application, check the 26 | * tutorials and examples on the [Flink Website](http://flink.apache.org/docs/stable/). 27 | * 28 | *

To package your application into a JAR file for execution, run 29 | * './gradlew shadowJar' on the command line. 30 | * 31 | * 32 | * If you change the name of the main class (with the public static void main(String[] args)) 33 | * method, change the respective entry in the build.gradle.kts file (simply search for 'javaMainClass'). 34 | */ 35 | object StreamingJob { 36 | private const val DEFAULT_STREAM_NAME = "timeseries-input-stream" 37 | private const val DEFAULT_REGION_NAME = "eu-west-1" 38 | private const val DEFAULT_DB_NAME = "timestreamDB" 39 | private const val DEFAULT_TABLE_NAME = "timestreamTable" 40 | 41 | private fun createKinesisSource(env: StreamExecutionEnvironment, parameter: ParameterTool): DataStream { 42 | 43 | //set Kinesis consumer properties 44 | val kinesisConsumerConfig = Properties() 45 | //set the region the Kinesis stream is located in 46 | kinesisConsumerConfig[AWSConfigConstants.AWS_REGION] = parameter["Region", DEFAULT_REGION_NAME] 47 | //obtain credentials through the DefaultCredentialsProviderChain, which includes the instance metadata 48 | kinesisConsumerConfig[AWSConfigConstants.AWS_CREDENTIALS_PROVIDER] = "AUTO" 49 | val adaptiveReadSettingStr = parameter["SHARD_USE_ADAPTIVE_READS", "false"] 50 | if (adaptiveReadSettingStr == "true") { 51 | kinesisConsumerConfig[ConsumerConfigConstants.SHARD_USE_ADAPTIVE_READS] = "true" 52 | } else { 53 | //poll new events from the Kinesis stream once every second 54 | kinesisConsumerConfig[ConsumerConfigConstants.SHARD_GETRECORDS_INTERVAL_MILLIS] = 55 | parameter["SHARD_GETRECORDS_INTERVAL_MILLIS", "1000"] 56 | // max records to get in shot 57 | kinesisConsumerConfig[ConsumerConfigConstants.SHARD_GETRECORDS_MAX] = 58 | parameter["SHARD_GETRECORDS_MAX", "10000"] 59 | } 60 | 61 | val stream = parameter["InputStreamName", DEFAULT_STREAM_NAME] 62 | //create Kinesis source 63 | return env.addSource( 64 | FlinkKinesisConsumer( //read events from the Kinesis stream passed in as a parameter 65 | stream, //deserialize events with EventSchema 66 | SimpleStringSchema(), //using the previously defined properties 67 | kinesisConsumerConfig 68 | ) 69 | ).name("KinesisSource<${stream}>") 70 | } 71 | 72 | private fun createDatabaseAndTableIfNotExist( 73 | region: String, 74 | databaseName: String, 75 | tableName: String 76 | ) { 77 | val timestreamInitializer = TimestreamInitializer(region) 78 | timestreamInitializer.createDatabase(databaseName) 79 | timestreamInitializer.createTable(databaseName, tableName) 80 | } 81 | 82 | @JvmStatic 83 | fun main(args: Array) { 84 | val parameter: ParameterTool = ParameterToolUtils.fromArgsAndApplicationProperties(args) 85 | 86 | // set up the streaming execution environment 87 | val env = StreamExecutionEnvironment.getExecutionEnvironment() 88 | val region = parameter["Region", DEFAULT_REGION_NAME] 89 | val databaseName = parameter["TimestreamDbName", DEFAULT_DB_NAME] 90 | val tableName = parameter["TimestreamTableName", DEFAULT_TABLE_NAME] 91 | val batchSize = parameter["TimestreamIngestBatchSize", "75"].toInt() 92 | 93 | createDatabaseAndTableIfNotExist(region, databaseName, tableName) 94 | 95 | env.streamTimeCharacteristic = TimeCharacteristic.EventTime 96 | env.config.autoWatermarkInterval = 1000L 97 | 98 | createKinesisSource(env, parameter) 99 | .map(JsonToTimestreamPayloadFn()).name("MaptoTimestreamPayload") 100 | .process(OffsetFutureTimestreamPoints()).name("UpdateFutureOffsetedTimestreamPoints") 101 | .addSink(TimestreamSink(region, databaseName, tableName, batchSize)) 102 | .name("TimestreamSink<$databaseName, $tableName>") 103 | 104 | // execute program 105 | env.execute("Flink Streaming Java API Skeleton") 106 | } 107 | } -------------------------------------------------------------------------------- /analytics/src/main/java/com/amazonaws/services/kinesisanalytics/StreamingJob.java: -------------------------------------------------------------------------------- 1 | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | // SPDX-License-Identifier: MIT-0 3 | 4 | package com.amazonaws.services.kinesisanalytics; 5 | 6 | import com.amazonaws.services.kinesisanalytics.operators.JsonToTimestreamPayloadFn; 7 | import com.amazonaws.services.kinesisanalytics.operators.OffsetFutureTimestreamPoints; 8 | import com.amazonaws.services.kinesisanalytics.utils.ParameterToolUtils; 9 | import com.amazonaws.services.timestream.TimestreamInitializer; 10 | import com.amazonaws.services.timestream.TimestreamSink; 11 | import org.apache.flink.api.common.serialization.SimpleStringSchema; 12 | import org.apache.flink.api.java.utils.ParameterTool; 13 | import org.apache.flink.streaming.api.TimeCharacteristic; 14 | import org.apache.flink.streaming.api.datastream.DataStream; 15 | import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; 16 | import org.apache.flink.streaming.connectors.kinesis.FlinkKinesisConsumer; 17 | import org.apache.flink.streaming.connectors.kinesis.config.AWSConfigConstants; 18 | import org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants; 19 | 20 | import java.util.Properties; 21 | 22 | /** 23 | * Skeleton for a Flink Streaming Job. 24 | * 25 | *

For a tutorial how to write a Flink streaming application, check the 26 | * tutorials and examples on the Flink Website. 27 | * 28 | *

To package your application into a JAR file for execution, run 29 | * 'mvn clean package' on the command line. 30 | * 31 | *

If you change the name of the main class (with the public static void main(String[] args)) 32 | * method, change the respective entry in the POM.xml file (simply search for 'mainClass'). 33 | */ 34 | public class StreamingJob { 35 | 36 | private static final String DEFAULT_STREAM_NAME = "timeseries-input-stream"; 37 | private static final String DEFAULT_REGION_NAME = "eu-west-1"; 38 | private static final String DEFAULT_DB_NAME = "timestreamDB"; 39 | private static final String DEFAULT_TABLE_NAME = "timestreamTable"; 40 | 41 | public static DataStream createKinesisSource(StreamExecutionEnvironment env, ParameterTool parameter) { 42 | 43 | //set Kinesis consumer properties 44 | Properties kinesisConsumerConfig = new Properties(); 45 | //set the region the Kinesis stream is located in 46 | kinesisConsumerConfig.setProperty(AWSConfigConstants.AWS_REGION, 47 | parameter.get("Region", DEFAULT_REGION_NAME)); 48 | //obtain credentials through the DefaultCredentialsProviderChain, which includes the instance metadata 49 | kinesisConsumerConfig.setProperty(AWSConfigConstants.AWS_CREDENTIALS_PROVIDER, "AUTO"); 50 | 51 | String adaptiveReadSettingStr = parameter.get("SHARD_USE_ADAPTIVE_READS", "false"); 52 | 53 | if (adaptiveReadSettingStr.equals("true")) { 54 | kinesisConsumerConfig.setProperty(ConsumerConfigConstants.SHARD_USE_ADAPTIVE_READS, "true"); 55 | } else { 56 | //poll new events from the Kinesis stream once every second 57 | kinesisConsumerConfig.setProperty(ConsumerConfigConstants.SHARD_GETRECORDS_INTERVAL_MILLIS, 58 | parameter.get("SHARD_GETRECORDS_INTERVAL_MILLIS", "1000")); 59 | // max records to get in shot 60 | kinesisConsumerConfig.setProperty(ConsumerConfigConstants.SHARD_GETRECORDS_MAX, 61 | parameter.get("SHARD_GETRECORDS_MAX", "10000")); 62 | } 63 | 64 | //create Kinesis source 65 | 66 | return env.addSource(new FlinkKinesisConsumer<>( 67 | //read events from the Kinesis stream passed in as a parameter 68 | parameter.get("InputStreamName", DEFAULT_STREAM_NAME), 69 | //deserialize events with EventSchema 70 | new SimpleStringSchema(), 71 | //using the previously defined properties 72 | kinesisConsumerConfig 73 | )).name("KinesisSource"); 74 | } 75 | 76 | public static void main(String[] args) throws Exception { 77 | final ParameterTool parameter = ParameterToolUtils.fromArgsAndApplicationProperties(args); 78 | 79 | // set up the streaming execution environment 80 | final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); 81 | 82 | final String region = parameter.get("Region", DEFAULT_REGION_NAME); 83 | final String databaseName = parameter.get("TimestreamDbName", DEFAULT_DB_NAME); 84 | final String tableName = parameter.get("TimestreamTableName", DEFAULT_TABLE_NAME); 85 | final int batchSize = Integer.parseInt(parameter.get("TimestreamIngestBatchSize", "75")); 86 | 87 | TimestreamInitializer timestreamInitializer = new TimestreamInitializer(region); 88 | timestreamInitializer.createDatabase(databaseName); 89 | timestreamInitializer.createTable(databaseName, tableName); 90 | 91 | env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime); 92 | env.getConfig().setAutoWatermarkInterval(1000L); 93 | 94 | createKinesisSource(env, parameter) 95 | .map(new JsonToTimestreamPayloadFn()).name("MaptoTimestreamPayload") 96 | .process(new OffsetFutureTimestreamPoints()).name("UpdateFutureOffsetedTimestreamPoints") 97 | .addSink(new TimestreamSink(region, databaseName, tableName, batchSize)) 98 | .name("TimeSeries<" + databaseName + ", " + tableName + ">"); 99 | 100 | // execute program 101 | env.execute("Flink Streaming Java API Skeleton"); 102 | } 103 | } 104 | -------------------------------------------------------------------------------- /analytics-kotlin/src/main/kotlin/services/timestream/TimestreamSink.kt: -------------------------------------------------------------------------------- 1 | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | // SPDX-License-Identifier: MIT-0 3 | 4 | package services.timestream 5 | 6 | import com.amazonaws.ClientConfiguration 7 | import com.amazonaws.services.timestreamwrite.AmazonTimestreamWrite 8 | import com.amazonaws.services.timestreamwrite.AmazonTimestreamWriteClientBuilder 9 | import com.amazonaws.services.timestreamwrite.model.Dimension 10 | import com.amazonaws.services.timestreamwrite.model.Record 11 | import com.amazonaws.services.timestreamwrite.model.RejectedRecordsException 12 | import com.amazonaws.services.timestreamwrite.model.WriteRecordsRequest 13 | import org.apache.flink.api.common.state.ListState 14 | import org.apache.flink.api.common.state.ListStateDescriptor 15 | import org.apache.flink.configuration.Configuration 16 | import org.apache.flink.runtime.state.FunctionInitializationContext 17 | import org.apache.flink.runtime.state.FunctionSnapshotContext 18 | import org.apache.flink.streaming.api.checkpoint.CheckpointedFunction 19 | import org.apache.flink.streaming.api.functions.sink.RichSinkFunction 20 | import org.slf4j.LoggerFactory 21 | import java.util.concurrent.LinkedBlockingQueue 22 | 23 | /** 24 | * Sink function for Flink to ingest data to Timestream 25 | */ 26 | class TimestreamSink( 27 | private val region: String, private val db: String, private val table: String, 28 | private val batchSize: Int 29 | ) : 30 | RichSinkFunction>(), CheckpointedFunction { 31 | companion object { 32 | private const val RECORDS_FLUSH_INTERVAL_MILLISECONDS = 60L * 1000L // One minute 33 | private val LOG = LoggerFactory.getLogger(TimestreamSink::class.java) 34 | } 35 | 36 | private val bufferedRecords = LinkedBlockingQueue() 37 | 38 | @Transient 39 | private lateinit var checkPointedState: ListState 40 | 41 | @Transient 42 | private lateinit var writeClient: AmazonTimestreamWrite 43 | 44 | private var emptyListTimestamp: Long = System.currentTimeMillis() 45 | 46 | override fun open(parameters: Configuration) { 47 | super.open(parameters) 48 | val clientConfiguration = ClientConfiguration() 49 | .withMaxConnections(5000) 50 | .withRequestTimeout(20 * 1000) 51 | .withMaxErrorRetry(10) 52 | writeClient = AmazonTimestreamWriteClientBuilder 53 | .standard() 54 | .withRegion(region) 55 | .withClientConfiguration(clientConfiguration) 56 | .build() 57 | } 58 | 59 | override fun invoke(value: Collection) { 60 | 61 | bufferedRecords.addAll(createRecords(value)) 62 | 63 | if (shouldPublish()) { 64 | while (!bufferedRecords.isEmpty()) { 65 | val recordsToSend: MutableList = ArrayList(batchSize) 66 | bufferedRecords.drainTo(recordsToSend, batchSize) 67 | writeBatch(recordsToSend) 68 | } 69 | } 70 | } 71 | 72 | private fun writeBatch(recordsToSend: MutableList) { 73 | val writeRecordsRequest = WriteRecordsRequest() 74 | .withDatabaseName(db) 75 | .withTableName(table) 76 | .withRecords(recordsToSend) 77 | try { 78 | val writeRecordsResult = writeClient.writeRecords(writeRecordsRequest) 79 | LOG.debug("writeRecords Status: ${writeRecordsResult.sdkHttpMetadata.httpStatusCode}") 80 | emptyListTimestamp = System.currentTimeMillis() 81 | } catch (e: RejectedRecordsException) { 82 | val rejectedRecords = e.rejectedRecords 83 | LOG.warn("Rejected Records -> ${rejectedRecords.size}") 84 | rejectedRecords.forEach { 85 | LOG.warn("Discarding Malformed Record -> $it") 86 | LOG.warn("Rejected Record Reason -> ${it.reason}") 87 | } 88 | } catch (e: Exception) { 89 | LOG.error("Error: $e", e) 90 | } 91 | } 92 | 93 | private fun createRecords(points: Collection): Collection { 94 | return points.asSequence() 95 | .map { 96 | Record() 97 | .withDimensions( 98 | it.getDimensions().entries 99 | .map { entry -> 100 | Dimension().withName(entry.key).withValue(entry.value) 101 | } 102 | ) 103 | .withMeasureName(it.measureName) 104 | .withMeasureValueType(it.measureValueType) 105 | .withMeasureValue(it.measureValue) 106 | .withTimeUnit(it.timeUnit) 107 | .withTime(it.time.toString()) 108 | }.toList() 109 | } 110 | 111 | // Method to validate if record batch should be published. 112 | // This method would return true if the accumulated records has reached the batch size. 113 | // Or if records have been accumulated for last RECORDS_FLUSH_INTERVAL_MILLISECONDS time interval. 114 | private fun shouldPublish(): Boolean { 115 | if (bufferedRecords.size >= batchSize) { 116 | LOG.debug("Batch of size ${bufferedRecords.size} should get published") 117 | return true 118 | } else if (System.currentTimeMillis() - emptyListTimestamp >= RECORDS_FLUSH_INTERVAL_MILLISECONDS) { 119 | LOG.debug("Records after flush interval should get published") 120 | return true 121 | } 122 | return false 123 | } 124 | 125 | override fun snapshotState(functionSnapshotContext: FunctionSnapshotContext) { 126 | checkPointedState.clear() 127 | bufferedRecords.forEach(checkPointedState::add) 128 | } 129 | 130 | override fun initializeState(functionInitializationContext: FunctionInitializationContext) { 131 | val descriptor = ListStateDescriptor("recordList", Record::class.java) 132 | checkPointedState = functionInitializationContext.operatorStateStore.getListState(descriptor) 133 | if (functionInitializationContext.isRestored) { 134 | bufferedRecords.addAll(checkPointedState.get()) 135 | } 136 | } 137 | } -------------------------------------------------------------------------------- /analytics-kotlin/gradlew: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | 3 | # 4 | # Copyright 2015 the original author or authors. 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # https://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | # 18 | 19 | ############################################################################## 20 | ## 21 | ## Gradle start up script for UN*X 22 | ## 23 | ############################################################################## 24 | 25 | # Attempt to set APP_HOME 26 | # Resolve links: $0 may be a link 27 | PRG="$0" 28 | # Need this for relative symlinks. 29 | while [ -h "$PRG" ] ; do 30 | ls=`ls -ld "$PRG"` 31 | link=`expr "$ls" : '.*-> \(.*\)$'` 32 | if expr "$link" : '/.*' > /dev/null; then 33 | PRG="$link" 34 | else 35 | PRG=`dirname "$PRG"`"/$link" 36 | fi 37 | done 38 | SAVED="`pwd`" 39 | cd "`dirname \"$PRG\"`/" >/dev/null 40 | APP_HOME="`pwd -P`" 41 | cd "$SAVED" >/dev/null 42 | 43 | APP_NAME="Gradle" 44 | APP_BASE_NAME=`basename "$0"` 45 | 46 | # Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. 47 | DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"' 48 | 49 | # Use the maximum available, or set MAX_FD != -1 to use that value. 50 | MAX_FD="maximum" 51 | 52 | warn () { 53 | echo "$*" 54 | } 55 | 56 | die () { 57 | echo 58 | echo "$*" 59 | echo 60 | exit 1 61 | } 62 | 63 | # OS specific support (must be 'true' or 'false'). 64 | cygwin=false 65 | msys=false 66 | darwin=false 67 | nonstop=false 68 | case "`uname`" in 69 | CYGWIN* ) 70 | cygwin=true 71 | ;; 72 | Darwin* ) 73 | darwin=true 74 | ;; 75 | MINGW* ) 76 | msys=true 77 | ;; 78 | NONSTOP* ) 79 | nonstop=true 80 | ;; 81 | esac 82 | 83 | CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar 84 | 85 | 86 | # Determine the Java command to use to start the JVM. 87 | if [ -n "$JAVA_HOME" ] ; then 88 | if [ -x "$JAVA_HOME/jre/sh/java" ] ; then 89 | # IBM's JDK on AIX uses strange locations for the executables 90 | JAVACMD="$JAVA_HOME/jre/sh/java" 91 | else 92 | JAVACMD="$JAVA_HOME/bin/java" 93 | fi 94 | if [ ! -x "$JAVACMD" ] ; then 95 | die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME 96 | 97 | Please set the JAVA_HOME variable in your environment to match the 98 | location of your Java installation." 99 | fi 100 | else 101 | JAVACMD="java" 102 | which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. 103 | 104 | Please set the JAVA_HOME variable in your environment to match the 105 | location of your Java installation." 106 | fi 107 | 108 | # Increase the maximum file descriptors if we can. 109 | if [ "$cygwin" = "false" -a "$darwin" = "false" -a "$nonstop" = "false" ] ; then 110 | MAX_FD_LIMIT=`ulimit -H -n` 111 | if [ $? -eq 0 ] ; then 112 | if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then 113 | MAX_FD="$MAX_FD_LIMIT" 114 | fi 115 | ulimit -n $MAX_FD 116 | if [ $? -ne 0 ] ; then 117 | warn "Could not set maximum file descriptor limit: $MAX_FD" 118 | fi 119 | else 120 | warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT" 121 | fi 122 | fi 123 | 124 | # For Darwin, add options to specify how the application appears in the dock 125 | if $darwin; then 126 | GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\"" 127 | fi 128 | 129 | # For Cygwin or MSYS, switch paths to Windows format before running java 130 | if [ "$cygwin" = "true" -o "$msys" = "true" ] ; then 131 | APP_HOME=`cygpath --path --mixed "$APP_HOME"` 132 | CLASSPATH=`cygpath --path --mixed "$CLASSPATH"` 133 | 134 | JAVACMD=`cygpath --unix "$JAVACMD"` 135 | 136 | # We build the pattern for arguments to be converted via cygpath 137 | ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null` 138 | SEP="" 139 | for dir in $ROOTDIRSRAW ; do 140 | ROOTDIRS="$ROOTDIRS$SEP$dir" 141 | SEP="|" 142 | done 143 | OURCYGPATTERN="(^($ROOTDIRS))" 144 | # Add a user-defined pattern to the cygpath arguments 145 | if [ "$GRADLE_CYGPATTERN" != "" ] ; then 146 | OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)" 147 | fi 148 | # Now convert the arguments - kludge to limit ourselves to /bin/sh 149 | i=0 150 | for arg in "$@" ; do 151 | CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -` 152 | CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option 153 | 154 | if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition 155 | eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"` 156 | else 157 | eval `echo args$i`="\"$arg\"" 158 | fi 159 | i=`expr $i + 1` 160 | done 161 | case $i in 162 | 0) set -- ;; 163 | 1) set -- "$args0" ;; 164 | 2) set -- "$args0" "$args1" ;; 165 | 3) set -- "$args0" "$args1" "$args2" ;; 166 | 4) set -- "$args0" "$args1" "$args2" "$args3" ;; 167 | 5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;; 168 | 6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;; 169 | 7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;; 170 | 8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;; 171 | 9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;; 172 | esac 173 | fi 174 | 175 | # Escape application args 176 | save () { 177 | for i do printf %s\\n "$i" | sed "s/'/'\\\\''/g;1s/^/'/;\$s/\$/' \\\\/" ; done 178 | echo " " 179 | } 180 | APP_ARGS=`save "$@"` 181 | 182 | # Collect all arguments for the java command, following the shell quoting and substitution rules 183 | eval set -- $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS "\"-Dorg.gradle.appname=$APP_BASE_NAME\"" -classpath "\"$CLASSPATH\"" org.gradle.wrapper.GradleWrapperMain "$APP_ARGS" 184 | 185 | exec "$JAVACMD" "$@" 186 | -------------------------------------------------------------------------------- /cdk/stacks/grafana/grafana_stack.py: -------------------------------------------------------------------------------- 1 | # Copyright Amazon.com, Inc. and its affiliates. All Rights Reserved. 2 | # SPDX-License-Identifier: MIT-0 3 | # 4 | # Licensed under the MIT-0 License. See the LICENSE accompanying this file 5 | # for the specific language governing permissions and limitations under 6 | # the License. 7 | 8 | from aws_cdk import ( 9 | core, 10 | aws_ec2 as ec2, 11 | aws_ecs as ecs, 12 | aws_ecs_patterns as ecs_patterns, 13 | aws_efs as efs, 14 | aws_iam as iam, 15 | aws_logs as logs, 16 | aws_secretsmanager as secretsmanager, 17 | aws_timestream as timestream, 18 | ) 19 | from aws_cdk.aws_ecs import PortMapping, MountPoint, EfsVolumeConfiguration, AuthorizationConfig 20 | from aws_cdk.aws_efs import PosixUser, Acl 21 | 22 | 23 | class GrafanaStack(core.Stack): 24 | 25 | def __init__(self, scope: core.Construct, construct_id: str, 26 | database: timestream.CfnDatabase, table: timestream.CfnTable, 27 | **kwargs) -> None: 28 | super().__init__(scope, construct_id, **kwargs) 29 | 30 | vpc = ec2.Vpc(self, "GrafanaVpc", max_azs=2) 31 | 32 | vpc.add_interface_endpoint('EFSEndpoint', service=ec2.InterfaceVpcEndpointAwsService.ELASTIC_FILESYSTEM) 33 | vpc.add_interface_endpoint('SMEndpoint', service=ec2.InterfaceVpcEndpointAwsService.SECRETS_MANAGER) 34 | 35 | cluster = ecs.Cluster(self, "MyCluster", vpc=vpc) 36 | 37 | file_system = efs.FileSystem( 38 | self, "EfsFileSystem", 39 | vpc=vpc, 40 | encrypted=True, 41 | lifecycle_policy=efs.LifecyclePolicy.AFTER_14_DAYS, 42 | performance_mode=efs.PerformanceMode.GENERAL_PURPOSE, 43 | throughput_mode=efs.ThroughputMode.BURSTING 44 | ) 45 | 46 | access_point = efs.AccessPoint( 47 | self, "EfsAccessPoint", 48 | file_system=file_system, 49 | path="/var/lib/grafana", 50 | posix_user=PosixUser( 51 | gid="1000", 52 | uid="1000" 53 | ), 54 | create_acl=Acl( 55 | owner_gid="1000", 56 | owner_uid="1000", 57 | permissions="755" 58 | ) 59 | ) 60 | 61 | log_group = logs.LogGroup(self, "taskLogGroup", 62 | retention=logs.RetentionDays.ONE_MONTH 63 | ) 64 | 65 | container_log_driver = ecs.LogDrivers.aws_logs(stream_prefix="fargate-grafana", log_group=log_group) 66 | 67 | task_role = iam.Role(self, "taskRole", assumed_by=iam.ServicePrincipal("ecs-tasks.amazonaws.com")) 68 | 69 | task_role.add_to_policy(iam.PolicyStatement( 70 | effect=iam.Effect.ALLOW, 71 | actions=[ 72 | "cloudwatch:DescribeAlarmsForMetric", 73 | "cloudwatch:DescribeAlarmHistory", 74 | "cloudwatch:DescribeAlarms", 75 | "cloudwatch:ListMetrics", 76 | "cloudwatch:GetMetricStatistics", 77 | "cloudwatch:GetMetricData", 78 | "ec2:DescribeTags", 79 | "ec2:DescribeInstances", 80 | "ec2:DescribeRegions", 81 | "tag:GetResources" 82 | ], 83 | resources=["*"] 84 | )) 85 | self.grant_timestream_read(task_role, database, table) 86 | 87 | execution_role = iam.Role(self, "executionRole", assumed_by=iam.ServicePrincipal("ecs-tasks.amazonaws.com")) 88 | log_group.grant_write(execution_role) 89 | 90 | volume_name = "efsGrafanaVolume" 91 | 92 | volume_config = ecs.Volume( 93 | name=volume_name, 94 | efs_volume_configuration=EfsVolumeConfiguration( 95 | file_system_id=file_system.file_system_id, 96 | transit_encryption="ENABLED", 97 | authorization_config=AuthorizationConfig(access_point_id=access_point.access_point_id) 98 | )) 99 | 100 | task_definition = ecs.FargateTaskDefinition( 101 | self, "TaskDef", 102 | task_role=task_role, 103 | execution_role=execution_role, 104 | volumes=[volume_config] 105 | ) 106 | 107 | grafana_admin_password = secretsmanager.Secret(self, "grafanaAdminPassword") 108 | grafana_admin_password.grant_read(task_role) 109 | 110 | container_web = task_definition.add_container( 111 | "grafana", 112 | image=ecs.ContainerImage.from_registry("grafana/grafana"), 113 | logging=container_log_driver, 114 | environment={ 115 | "GF_INSTALL_PLUGINS": "grafana-timestream-datasource", 116 | "GF_AWS_default_REGION": core.Aws.REGION 117 | }, 118 | secrets={ 119 | "GF_SECURITY_ADMIN_PASSWORD": ecs.Secret.from_secrets_manager( 120 | grafana_admin_password) 121 | }) 122 | 123 | container_web.add_port_mappings(PortMapping(container_port=3000)) 124 | container_web.add_mount_points( 125 | MountPoint(container_path="/var/lib/grafana", read_only=False, source_volume=volume_config.name) 126 | ) 127 | 128 | fargate_service = ecs_patterns.ApplicationLoadBalancedFargateService( 129 | self, "MyFargateService", 130 | cluster=cluster, 131 | cpu=1024, 132 | desired_count=1, 133 | task_definition=task_definition, 134 | memory_limit_mib=2048, 135 | platform_version=ecs.FargatePlatformVersion.LATEST 136 | ) 137 | 138 | fargate_service.target_group.configure_health_check(path="/api/health") 139 | file_system.connections.allow_default_port_from(fargate_service.service.connections) 140 | 141 | core.CfnOutput(self, "GrafanaAdminSecret", value=grafana_admin_password.secret_name, 142 | export_name="GrafanaAdminSecret") 143 | 144 | def grant_timestream_read(self, execution_role, database, table): 145 | execution_role.add_to_policy(iam.PolicyStatement( 146 | effect=iam.Effect.ALLOW, 147 | actions=[ 148 | "timestream:DescribeEndpoints", 149 | "timestream:ListDatabases", 150 | "timestream:SelectValues" 151 | ], 152 | resources=["*"] 153 | )) 154 | execution_role.add_to_policy(iam.PolicyStatement( 155 | effect=iam.Effect.ALLOW, 156 | actions=[ 157 | "timestream:ListTables", 158 | "timestream:DescribeDatabase" 159 | ], 160 | resources=[database.attr_arn] 161 | )) 162 | execution_role.add_to_policy(iam.PolicyStatement( 163 | effect=iam.Effect.ALLOW, 164 | actions=[ 165 | "timestream:Select", 166 | "timestream:ListMeasures", 167 | "timestream:DescribeTable" 168 | ], 169 | resources=[table.attr_arn] 170 | )) 171 | -------------------------------------------------------------------------------- /analytics/src/main/java/com/amazonaws/services/timestream/TimestreamSink.java: -------------------------------------------------------------------------------- 1 | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | // SPDX-License-Identifier: MIT-0 3 | 4 | package com.amazonaws.services.timestream; 5 | 6 | import com.amazonaws.ClientConfiguration; 7 | import com.amazonaws.services.timestreamwrite.AmazonTimestreamWrite; 8 | import com.amazonaws.services.timestreamwrite.AmazonTimestreamWriteClientBuilder; 9 | import com.amazonaws.services.timestreamwrite.model.*; 10 | import org.apache.flink.api.common.state.ListState; 11 | import org.apache.flink.api.common.state.ListStateDescriptor; 12 | import org.apache.flink.configuration.Configuration; 13 | import org.apache.flink.runtime.state.FunctionInitializationContext; 14 | import org.apache.flink.runtime.state.FunctionSnapshotContext; 15 | import org.apache.flink.streaming.api.checkpoint.CheckpointedFunction; 16 | import org.apache.flink.streaming.api.functions.sink.RichSinkFunction; 17 | import org.slf4j.Logger; 18 | import org.slf4j.LoggerFactory; 19 | 20 | import java.util.ArrayList; 21 | import java.util.Collection; 22 | import java.util.List; 23 | import java.util.concurrent.BlockingQueue; 24 | import java.util.concurrent.LinkedBlockingQueue; 25 | import java.util.stream.Collectors; 26 | 27 | /** 28 | * Sink function for Flink to ingest data to Timestream 29 | */ 30 | public class TimestreamSink extends RichSinkFunction> implements CheckpointedFunction { 31 | private static final long RECORDS_FLUSH_INTERVAL_MILLISECONDS = 60L * 1000L; // One minute 32 | private final Logger logger = LoggerFactory.getLogger(getClass()); 33 | private final String region; 34 | private final String db; 35 | private final String table; 36 | private final Integer batchSize; 37 | private final BlockingQueue bufferedRecords; 38 | private transient ListState checkPointedState; 39 | private transient AmazonTimestreamWrite writeClient; 40 | private long emptyListTimestamp; 41 | 42 | public TimestreamSink(String region, String databaseName, String tableName, int batchSize) { 43 | this.region = region; 44 | this.db = databaseName; 45 | this.table = tableName; 46 | this.batchSize = batchSize; 47 | this.bufferedRecords = new LinkedBlockingQueue<>(); 48 | this.emptyListTimestamp = System.currentTimeMillis(); 49 | } 50 | 51 | @Override 52 | public void open(Configuration parameters) throws Exception { 53 | super.open(parameters); 54 | 55 | final ClientConfiguration clientConfiguration = new ClientConfiguration() 56 | .withMaxConnections(5000) 57 | .withRequestTimeout(20 * 1000) 58 | .withMaxErrorRetry(10); 59 | 60 | this.writeClient = AmazonTimestreamWriteClientBuilder 61 | .standard() 62 | .withRegion(this.region) 63 | .withClientConfiguration(clientConfiguration) 64 | .build(); 65 | } 66 | 67 | @Override 68 | public void invoke(Collection points, Context context) { 69 | bufferedRecords.addAll(createRecords(points)); 70 | 71 | if (shouldPublish()) { 72 | while (!bufferedRecords.isEmpty()) { 73 | List recordsToSend = new ArrayList<>(batchSize); 74 | bufferedRecords.drainTo(recordsToSend, batchSize); 75 | 76 | writeBatch(recordsToSend); 77 | } 78 | } 79 | } 80 | 81 | private void writeBatch(List recordsToSend) { 82 | WriteRecordsRequest writeRecordsRequest = new WriteRecordsRequest() 83 | .withDatabaseName(this.db) 84 | .withTableName(this.table) 85 | .withRecords(recordsToSend); 86 | 87 | try { 88 | WriteRecordsResult writeRecordsResult = this.writeClient.writeRecords(writeRecordsRequest); 89 | logger.debug("writeRecords Status: " + writeRecordsResult.getSdkHttpMetadata().getHttpStatusCode()); 90 | emptyListTimestamp = System.currentTimeMillis(); 91 | 92 | } catch (RejectedRecordsException e) { 93 | List rejectedRecords = e.getRejectedRecords(); 94 | logger.warn("Rejected Records -> " + rejectedRecords.size()); 95 | for (int i = rejectedRecords.size() - 1; i >= 0; i--) { 96 | logger.warn("Discarding Malformed Record -> {}", rejectedRecords.get(i).toString()); 97 | logger.warn("Rejected Record Reason -> {}", rejectedRecords.get(i).getReason()); 98 | } 99 | } catch (Exception e) { 100 | logger.error("Error: " + e); 101 | } 102 | } 103 | 104 | private Collection createRecords(Collection points) { 105 | return points.stream() 106 | .map(point -> new Record() 107 | .withDimensions(point.getDimensions().entrySet().stream() 108 | .map(entry -> new Dimension() 109 | .withName(entry.getKey()) 110 | .withValue(entry.getValue())) 111 | .collect(Collectors.toList())) 112 | .withMeasureName(point.getMeasureName()) 113 | .withMeasureValueType(point.getMeasureValueType()) 114 | .withMeasureValue(point.getMeasureValue()) 115 | .withTimeUnit(point.getTimeUnit()) 116 | .withTime(String.valueOf(point.getTime()))) 117 | .collect(Collectors.toList()); 118 | } 119 | 120 | // Method to validate if record batch should be published. 121 | // This method would return true if the accumulated records has reached the batch size. 122 | // Or if records have been accumulated for last RECORDS_FLUSH_INTERVAL_MILLISECONDS time interval. 123 | private boolean shouldPublish() { 124 | if (bufferedRecords.size() >= batchSize) { 125 | logger.debug("Batch of size " + bufferedRecords.size() + " should get published"); 126 | return true; 127 | } else if (System.currentTimeMillis() - emptyListTimestamp >= RECORDS_FLUSH_INTERVAL_MILLISECONDS) { 128 | logger.debug("Records after flush interval should get published"); 129 | return true; 130 | } 131 | return false; 132 | } 133 | 134 | @Override 135 | public void close() throws Exception { 136 | super.close(); 137 | } 138 | 139 | @Override 140 | public void snapshotState(FunctionSnapshotContext functionSnapshotContext) throws Exception { 141 | checkPointedState.clear(); 142 | for (Record bufferedRecord : bufferedRecords) { 143 | checkPointedState.add(bufferedRecord); 144 | } 145 | } 146 | 147 | @Override 148 | public void initializeState(FunctionInitializationContext functionInitializationContext) throws Exception { 149 | ListStateDescriptor descriptor = new ListStateDescriptor<>("recordList", Record.class); 150 | 151 | checkPointedState = functionInitializationContext.getOperatorStateStore().getListState(descriptor); 152 | 153 | if (functionInitializationContext.isRestored()) { 154 | for (Record element : checkPointedState.get()) { 155 | bufferedRecords.add(element); 156 | } 157 | } 158 | } 159 | } -------------------------------------------------------------------------------- /analytics/pom.xml: -------------------------------------------------------------------------------- 1 | 5 | 7 | 8 | 9 | MIT No Attribution 10 | https://opensource.org/licenses/MIT-0 11 | 12 | 13 | 14 | 4.0.0 15 | 16 | com.amazonaws.services.kinesisanalytics 17 | analytics-timestream-java-sample 18 | 1.0 19 | jar 20 | 21 | Flink Amazon TimeStream Java sample 22 | 23 | 24 | UTF-8 25 | 1.11.1 26 | 1.2.0 27 | 11 28 | 2.12 29 | ${java.version} 30 | ${java.version} 31 | 32 | 33 | 34 | 35 | 36 | com.amazonaws 37 | aws-java-sdk-bom 38 | 1.12.128 39 | pom 40 | import 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | org.apache.flink 51 | flink-java 52 | ${flink.version} 53 | 54 | 55 | org.apache.flink 56 | flink-streaming-java_${scala.binary.version} 57 | ${flink.version} 58 | 59 | 60 | org.apache.flink 61 | flink-clients_${scala.binary.version} 62 | ${flink.version} 63 | 64 | 65 | 66 | com.google.code.gson 67 | gson 68 | 2.8.9 69 | 70 | 71 | 72 | 73 | com.amazonaws 74 | aws-kinesisanalytics-runtime 75 | ${kda.version} 76 | 77 | 78 | 79 | org.apache.flink 80 | flink-connector-kinesis_${scala.binary.version} 81 | ${flink.version} 82 | 83 | 84 | 85 | com.amazonaws 86 | aws-java-sdk-timestreamwrite 87 | 88 | 89 | 90 | org.slf4j 91 | slf4j-simple 92 | 1.7.32 93 | 94 | 95 | 96 | 97 | 98 | 99 | 100 | 101 | org.apache.maven.plugins 102 | maven-compiler-plugin 103 | 3.1 104 | 105 | ${java.version} 106 | ${java.version} 107 | 108 | 109 | 110 | 111 | 112 | 113 | org.apache.maven.plugins 114 | maven-shade-plugin 115 | 3.0.0 116 | 117 | 118 | 119 | package 120 | 121 | shade 122 | 123 | 124 | 125 | 126 | org.apache.flink:force-shading 127 | com.google.code.findbugs:jsr305 128 | org.slf4j:* 129 | log4j:* 130 | 131 | 132 | 133 | 134 | 136 | *:* 137 | 138 | META-INF/*.SF 139 | META-INF/*.DSA 140 | META-INF/*.RSA 141 | 142 | 143 | 144 | 145 | 147 | com.amazonaws.services.kinesisanalytics.StreamingJob 148 | 149 | 150 | 151 | 152 | 153 | 154 | 155 | 156 | 157 | 158 | 159 | 160 | 161 | org.eclipse.m2e 162 | lifecycle-mapping 163 | 1.0.0 164 | 165 | 166 | 167 | 168 | 169 | org.apache.maven.plugins 170 | maven-shade-plugin 171 | [3.0.0,) 172 | 173 | shade 174 | 175 | 176 | 177 | 178 | 179 | 180 | 181 | 182 | org.apache.maven.plugins 183 | maven-compiler-plugin 184 | [3.1,) 185 | 186 | testCompile 187 | compile 188 | 189 | 190 | 191 | 192 | 193 | 194 | 195 | 196 | 197 | 198 | 199 | 200 | 201 | 202 | 203 | 204 | 205 | 206 | 207 | add-dependencies-for-IDEA 208 | 209 | 210 | idea.version 211 | 212 | 213 | 214 | 215 | org.apache.flink 216 | flink-java 217 | ${flink.version} 218 | compile 219 | 220 | 221 | org.apache.flink 222 | flink-streaming-java_${scala.binary.version} 223 | ${flink.version} 224 | compile 225 | 226 | 227 | 228 | 229 | 230 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## Amazon Timestream with Amazon Kinesis and Grafana Demo 2 | 3 | Sample application that reads events from Amazon Kinesis Data Streams and batches records to Amazon Timestream, 4 | visualizing results via Grafana. 5 | 6 | ### Overall architecture 7 | 8 | The overall serverless architecture will work on events streamed to [Amazon Kinesis](https://aws.amazon.com/kinesis/) 9 | Data Streams, pulled by an [Apache Flink](https://flink.apache.org/) 10 | analytics application hosted on [Amazon Kinesis Data Analytics](https://aws.amazon.com/de/kinesis/data-analytics/) 11 | to be batch-inserted in [Amazon Timestream](https://aws.amazon.com/timestream/) database. Finally, data will be 12 | visualized directly from the Timestream database by a Grafana dashboard using the 13 | [Timestream datasource plugin](https://grafana.com/grafana/plugins/grafana-timestream-datasource). 14 | 15 | ![Data sent from IoT device, to Amazon Kinesis Data Stream to Amazon Kinesis Data Analytics for Apache Flink to Amazon Timestream then visualized through Grafana ](assets/OverallArchitecture.svg) 16 | 17 | The sample setup will assume that events are being streamed via Amazon Kinesis service. However, this is not a 18 | precondition as any other streaming service like Kafka provisioned on EC2 or 19 | using [Amazon Managed Streaming for Apache Kafka(Amazon MSK)](https://aws.amazon.com/msk/) can be used in a similar 20 | setup. To simulate devices streaming data to the Kinesis data stream, an AWS Lambda function produces events and pushes 21 | them to Kinesis data stream, the events streamed will contain information similar to the below sample. 22 | 23 | |DeviceID |Timestamp |temperature |humidity |voltage | watt | 24 | |---------------------------------------|:--------------------------:|:----------:|:-------:|:------:|-----:| 25 | |b974f43a-2f04-11eb-adc1-0242ac120002 |2020-09-13 11:49:42.5352820 |15.5 |70.3 |39.7 |301.44| 26 | 27 | In Timestream data will be modeled as follows 28 | 29 | |DeviceID (Dimension) | measure_value::double | measure_name | measure_type |time | 30 | |----------------------------------------|:------------------------:|:------------:|:-------------|:-------------------------:| 31 | |b974f43a-2f04-11eb-adc1-0242ac120002 |15.5 |temperature |DOUBLE |2020-09-13 11:49:42.5352820| 32 | |b974f43a-2f04-11eb-adc1-0242ac120002 |70.3 |humidity |DOUBLE |2020-09-13 11:49:42.5352820| 33 | |b974f43a-2f04-11eb-adc1-0242ac120002 |39.7 |voltage |DOUBLE |2020-09-13 11:49:42.5352820| 34 | |b974f43a-2f04-11eb-adc1-0242ac120002 |301.4 |watt |DOUBLE |2020-09-13 11:49:42.5352820| 35 | 36 | The Device ID is mapped as a 37 | [Dimension](https://docs.aws.amazon.com/timestream/latest/developerguide/API_Dimension.html) 38 | and the Property fields measured mapped as 39 | [`measure_name`](https://docs.aws.amazon.com/timestream/latest/developerguide/concepts.html) 40 | , finally the value of the measure is mapped to the 41 | [`measure_value`](https://docs.aws.amazon.com/timestream/latest/developerguide/concepts.html) 42 | .`datatype` is set as double in this case. 43 | [Check the best practices on mapping](https://docs.aws.amazon.com/timestream/latest/developerguide/best-practices.html#data-modeling) 44 | and data modeling for a better insight on mapping your data. 45 | 46 | ## Getting started 47 | 48 | ### 1. Building and packaging Amazon Kinesis Data Analytics for Apache Flink application 49 | 50 | This project provides 2 sample applications built with different toolsets. You can use either one of those as the 51 | application to be deployed. 52 | 53 | 1. To build an application using Java and Apache Maven, refer to [instructions here](./analytics/README.md) 54 | 2. To build an application using Kotlin and Gradle, refer to [instructions here](./analytics-kotlin/README.md) 55 | 56 | ### 2. Deploy infrastructure 57 | 58 | Infrastructure deployment will automatically use packaged application jar and upload it to an 59 | [Amazon S3](https://aws.amazon.com/s3/) bucket. The infrastructure utilizes multiple stacks built using an AWS CDK 60 | project with Python3 language. For more information on working with the CDK and Python, 61 | [check the following guide](https://docs.aws.amazon.com/cdk/latest/guide/work-with-cdk-python.html). To deploy all 62 | stacks use the `--all` option when invoking `cdk deploy` 63 | 64 | 1. Navigate to cdk folder 65 | 1. Follow [instructions here](cdk/README.md) to create virtual environment and build stacks 66 | 1. Make sure CDK environment is bootstrapped in the account and region you're deploying stacks to, as the stacks utilize 67 | assets to deploy the Kinesis Data Analytics Flink application 68 | ```shell 69 | $ cdk bootstrap 70 | ``` 71 | 1. Deploy infrastructure using packaged applications 72 | 1. To deploy infrastructure and use `Java` application as basis for Kinesis analytics application, you can directly 73 | deploy the CDK stacks 74 | ```shell 75 | $ cdk deploy --all 76 | ``` 77 | 1. To deploy infrastructure and use `Kotlin` application as basis for Kinesis analytics application, you can 78 | customize the stacks using [context variables](https://docs.aws.amazon.com/cdk/latest/guide/context.html) 79 | ```shell 80 | $ cdk deploy --context kda_path=../analytics-kotlin/build/libs/analytics-timestream-kotlin-sample-all.jar --all 81 | ``` 82 | 83 | #### Check deployed resource and kick-off pipeline 84 | 85 | Once CDK stacks are deployed successfully you can check created AWS resources. You can directly run 86 | script [`./setup.sh`](setup.sh) or follow below instructions. 87 | 88 | 1. Amazon Kinesis Data Stream 89 | 90 | Deployed through stack `amazon-kinesis-stream-stack` and is ready to receive events from sample producer. 91 | 92 | ![Amazon EventBridge 1 min rate event triggers AWS Lambda to send JSON data to Amazon Kinesis Data Stream](assets/DeviceSimulatorSampleProducer.svg) 93 | 94 | Producer resources are deployed through stack `sample-kinesis-stream-producer`. You can check the Lambda function 95 | monitoring and logs to make sure it's being regularly called and sending events to the stream. 96 | 97 | For more information on how the producer 98 | works [check documentation](cdk/stacks/sample_kinesis_stream_producer/README.md). 99 | 100 | 1. Amazon Kinesis Data Analytics for Apache Flink Application 101 | 102 | Deployed through stack `amazon-kinesis-analytics`. Although the application is created through the stack it would 103 | still not be running. 104 | 105 | To run the application and kick-off the pipeline, simply pickup the application name from the stack 106 | output `KdaApplicationName`. 107 | 108 | Follow instructions 109 | under [Run the Application section](https://docs.aws.amazon.com/kinesisanalytics/latest/java/get-started-exercise.html#get-started-exercise-7) 110 | or run the following command 111 | ```shell 112 | $ aws kinesisanalyticsv2 start-application --application-name amazon-kinesis-analytics \ 113 | --run-configuration '{ "ApplicationRestoreConfiguration": { "ApplicationRestoreType": "SKIP_RESTORE_FROM_SNAPSHOT" } }' 114 | ``` 115 | 1. Amazon S3 bucket to store the JAR package for the application and application role 116 | 117 | Deployed through stack `flink-source-bucket` 118 | 1. Amazon Timestream Database 119 | 120 | Deployed through stack `amazon-timestream` 121 | 1. Grafana dashboard deployment 122 | 123 | Deployed through stack `grafana`. To check the created Grafana check the output `MyFargateServiceServiceURL...`. 124 | 1. Grafana deployed using Amazon ECS on AWS Fargate for compute 125 | 1. Amazon Elastic File System (Amazon EFS) for storage 126 | 1. AWS SecretsManager Secret storing Grafana `admin` user password. Check the stack output `GrafanaAdminSecret` for 127 | AWS Secrets Manager Secret's id storing the Grafana admin user's password. 128 | 129 | ### 3. Visualizing Amazon Timestream values through Grafana 130 | 131 | To help you get started with data visualization, we have created a sample dashboard in Grafana that visualizes data sent 132 | to Timestream from the sample producer. If you invoked the [`setup.sh`](setup.sh) script, it would have automatically 133 | performed these steps. 134 | 135 | You can also check the 136 | following [video tutorial](https://docs.aws.amazon.com/timestream/latest/developerguide/Grafana.html#Grafana.video-tutorial) 137 | or [complete guide](https://docs.aws.amazon.com/timestream/latest/developerguide/Grafana.html) for more information. 138 | 139 | 1. Install Grafana Datasource plugin 140 | 141 | The [Grafana Amazon Timestream datasource plugin](https://grafana.com/grafana/plugins/grafana-timestream-datasource/installation) 142 | is automatically installed through the infrastructure stack. 143 | 1. Create Grafana Datasource 144 | 145 | To create a Timeseries data source, go to Datasources, click on Add Datasource, search for Timestream, and select the 146 | Timestream datasource. You can also 147 | use [programmatic means described here](https://grafana.com/docs/grafana/latest/http_api/data_source/). 148 | 149 | To use the programmatic means to create a data source: 150 | 1. Create an API token to be used. Make sure to replace `` with the 151 | sceret value, escaping any characters in the password like `$` or `"` with a preceding `\`. And 152 | replace `` with the value from stack output. 153 | ```shell 154 | $ grafana_token=$(\ 155 | curl -X POST -u "admin:" \ 156 | -H "Content-Type: application/json" \ 157 | -d '{"name":"apikeycurl", "role": "Admin"}' \ 158 | /api/auth/keys \ 159 | | jq -r .key) 160 | ``` 161 | 1. Create Amazon Timestream datasource 162 | ```shell 163 | $ curl -X POST --insecure \ 164 | -H "Content-Type: application/json" -H "Authorization: Bearer ${grafana_token}" \ 165 | -d @./cdk/stacks/grafana/datasource.json \ 166 | /api/datasources 167 | ``` 168 | 1. Create Grafana Dashboard 169 | 170 | Grafana dashboards can be exported and imported as `JSON`. You can find a 171 | dashboard [sample json in here](cdk/stacks/grafana/dashboard.json). The defined dashboard provides sample variables 172 | and visualizations by querying directly from Timestream database. The dashboard assumes the Timestream datasource is 173 | your default datasource. 174 | 175 | To import sample dashboard you 176 | can [follow instructions here](https://grafana.com/docs/grafana/latest/dashboards/export-import/#importing-a-dashboard) 177 | or use [programmatic means described here](https://grafana.com/docs/grafana/latest/http_api/dashboard/). 178 | 179 | To use the programmatic means to create a dashboard: 180 | ```shell 181 | $ curl -X POST --insecure \ 182 | -H "Content-Type: application/json" -H "Authorization: Bearer ${grafana_token}" \ 183 | -d @./cdk/stacks/grafana/dashboard.json \ 184 | /api/dashboards/db 185 | ``` 186 | 1. To check created dashboard navigate to 187 | 1. Login using admin credentials. 188 | 1. Check created dashboard under "Dashboards" -> "Manage" 189 | ![Grafana dashboards screenshot](assets/GrafanaDashboard.png) 190 | 191 | ## Clean up 192 | 193 | To delete all created stack resources you can use 194 | 195 | ```shell 196 | $ cdk destroy --all 197 | ``` 198 | 199 | ## Security 200 | 201 | See [CONTRIBUTING](CONTRIBUTING.md#security-issue-notifications) for more information. 202 | 203 | ## License 204 | 205 | This library is licensed under the MIT-0 License. See the LICENSE file. 206 | 207 | -------------------------------------------------------------------------------- /cdk/stacks/grafana/dashboard.json: -------------------------------------------------------------------------------- 1 | { 2 | "_comment": "Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: MIT-0", 3 | "dashboard": { 4 | "editable": true, 5 | "panels": [ 6 | { 7 | "datasource": null, 8 | "description": "", 9 | "fieldConfig": { 10 | "defaults": { 11 | "color": { 12 | "mode": "palette-classic" 13 | }, 14 | "custom": {}, 15 | "mappings": [], 16 | "thresholds": { 17 | "mode": "absolute", 18 | "steps": [ 19 | { 20 | "color": "green", 21 | "value": null 22 | }, 23 | { 24 | "color": "red", 25 | "value": 80 26 | } 27 | ] 28 | } 29 | }, 30 | "overrides": [] 31 | }, 32 | "gridPos": { 33 | "h": 8, 34 | "w": 12, 35 | "x": 0, 36 | "y": 0 37 | }, 38 | "id": 8, 39 | "options": { 40 | "reduceOptions": { 41 | "calcs": [ 42 | "mean" 43 | ], 44 | "fields": "", 45 | "values": false 46 | }, 47 | "showThresholdLabels": false, 48 | "showThresholdMarkers": true 49 | }, 50 | "pluginVersion": "7.3.6", 51 | "targets": [ 52 | { 53 | "measure": "voltage_L1", 54 | "queryType": "raw", 55 | "rawQuery": "SELECT DeviceID, BIN(time, $__interval_ms) AS time_bin, ROUND(AVG(measure_value::double), 2) AS avg_value, ROUND(APPROX_PERCENTILE(measure_value::double, 0.9), 2) AS p90_value, ROUND(APPROX_PERCENTILE(measure_value::double, 0.95), 2) AS p95_value, ROUND(APPROX_PERCENTILE(measure_value::double, 0.99), 2) AS p99_value FROM $__database.$__table WHERE $__timeFilter AND measure_name = '${measure}' AND DeviceID = '${device_id}' GROUP BY DeviceID, BIN(time, $__interval_ms) ORDER BY p99_value DESC", 56 | "refId": "A" 57 | } 58 | ], 59 | "timeFrom": null, 60 | "timeShift": null, 61 | "title": "Single device measure", 62 | "type": "gauge" 63 | }, 64 | { 65 | "datasource": null, 66 | "fieldConfig": { 67 | "defaults": { 68 | "custom": { 69 | "align": null, 70 | "filterable": false 71 | }, 72 | "mappings": [], 73 | "thresholds": { 74 | "mode": "absolute", 75 | "steps": [ 76 | { 77 | "color": "green", 78 | "value": null 79 | }, 80 | { 81 | "color": "red", 82 | "value": 80 83 | } 84 | ] 85 | } 86 | }, 87 | "overrides": [] 88 | }, 89 | "gridPos": { 90 | "h": 4, 91 | "w": 12, 92 | "x": 12, 93 | "y": 0 94 | }, 95 | "id": 4, 96 | "options": { 97 | "colorMode": "value", 98 | "graphMode": "area", 99 | "justifyMode": "auto", 100 | "orientation": "auto", 101 | "reduceOptions": { 102 | "calcs": [ 103 | "mean" 104 | ], 105 | "fields": "", 106 | "values": false 107 | }, 108 | "textMode": "auto" 109 | }, 110 | "pluginVersion": "7.3.6", 111 | "targets": [ 112 | { 113 | "hide": false, 114 | "queryType": "raw", 115 | "rawQuery": "SELECT CASE WHEN measure_name = 'voltage_measure' THEN count_value ELSE NULL END AS voltage,\n CASE WHEN measure_name = 'watt_measure' THEN count_value ELSE NULL END AS watt,\n CASE WHEN measure_name = 'temperature_measure' THEN count_value ELSE NULL END AS temperature,\n CASE WHEN measure_name = 'humidity_measure' THEN count_value ELSE NULL END AS humidity\nFROM (\n SELECT measure_name, Count(*) AS count_value\n FROM $__database.$__table\n Group BY measure_name\n)", 116 | "refId": "B" 117 | } 118 | ], 119 | "timeFrom": null, 120 | "timeShift": null, 121 | "title": "Count measurements reads", 122 | "type": "stat" 123 | }, 124 | { 125 | "datasource": null, 126 | "description": "", 127 | "fieldConfig": { 128 | "defaults": { 129 | "color": { 130 | "mode": "palette-classic" 131 | }, 132 | "custom": { 133 | "align": null, 134 | "filterable": false 135 | }, 136 | "mappings": [], 137 | "thresholds": { 138 | "mode": "percentage", 139 | "steps": [ 140 | { 141 | "color": "green", 142 | "value": null 143 | }, 144 | { 145 | "color": "red", 146 | "value": 80 147 | } 148 | ] 149 | }, 150 | "unit": "none" 151 | }, 152 | "overrides": [] 153 | }, 154 | "gridPos": { 155 | "h": 3, 156 | "w": 12, 157 | "x": 12, 158 | "y": 4 159 | }, 160 | "id": 2, 161 | "options": { 162 | "displayMode": "lcd", 163 | "orientation": "horizontal", 164 | "reduceOptions": { 165 | "calcs": [ 166 | "last" 167 | ], 168 | "fields": "/.*/", 169 | "values": false 170 | }, 171 | "showUnfilled": true 172 | }, 173 | "pluginVersion": "7.3.6", 174 | "targets": [ 175 | { 176 | "queryType": "raw", 177 | "rawQuery": "SELECT measure_value::double as ${measure} FROM $__database.$__table where measure_name = '${measure}' AND $__timeFilter", 178 | "refId": "A" 179 | } 180 | ], 181 | "timeFrom": null, 182 | "timeShift": null, 183 | "title": "Measure", 184 | "type": "bargauge" 185 | }, 186 | { 187 | "cards": { 188 | "cardPadding": null, 189 | "cardRound": null 190 | }, 191 | "color": { 192 | "cardColor": "#b4ff00", 193 | "colorScale": "sqrt", 194 | "colorScheme": "interpolateBlues", 195 | "exponent": 0.5, 196 | "mode": "spectrum" 197 | }, 198 | "dataFormat": "timeseries", 199 | "datasource": null, 200 | "fieldConfig": { 201 | "defaults": { 202 | "custom": {} 203 | }, 204 | "overrides": [] 205 | }, 206 | "gridPos": { 207 | "h": 8, 208 | "w": 12, 209 | "x": 12, 210 | "y": 7 211 | }, 212 | "heatmap": {}, 213 | "hideZeroBuckets": false, 214 | "highlightCards": true, 215 | "id": 10, 216 | "legend": { 217 | "show": false 218 | }, 219 | "pluginVersion": "7.3.2", 220 | "reverseYBuckets": false, 221 | "targets": [ 222 | { 223 | "queryType": "raw", 224 | "rawQuery": "select * from $__database.$__table\nWHERE $__timeFilter\nAND measure_name = '${measure}'", 225 | "refId": "A" 226 | } 227 | ], 228 | "timeFrom": null, 229 | "timeShift": null, 230 | "title": "Measure value spectrum for all devices", 231 | "tooltip": { 232 | "show": true, 233 | "showHistogram": false 234 | }, 235 | "type": "heatmap", 236 | "xAxis": { 237 | "show": true 238 | }, 239 | "xBucketNumber": null, 240 | "xBucketSize": null, 241 | "yAxis": { 242 | "decimals": null, 243 | "format": "none", 244 | "logBase": 1, 245 | "max": null, 246 | "min": null, 247 | "show": true, 248 | "splitFactor": null 249 | }, 250 | "yBucketBound": "auto", 251 | "yBucketNumber": null, 252 | "yBucketSize": null 253 | }, 254 | { 255 | "aliasColors": {}, 256 | "bars": false, 257 | "dashLength": 10, 258 | "dashes": false, 259 | "datasource": null, 260 | "fieldConfig": { 261 | "defaults": { 262 | "custom": {}, 263 | "mappings": [], 264 | "thresholds": { 265 | "mode": "absolute", 266 | "steps": [ 267 | { 268 | "color": "green", 269 | "value": null 270 | }, 271 | { 272 | "color": "red", 273 | "value": 80 274 | } 275 | ] 276 | }, 277 | "unit": "none" 278 | }, 279 | "overrides": [] 280 | }, 281 | "fill": 1, 282 | "fillGradient": 0, 283 | "gridPos": { 284 | "h": 8, 285 | "w": 12, 286 | "x": 0, 287 | "y": 8 288 | }, 289 | "hiddenSeries": false, 290 | "id": 12, 291 | "legend": { 292 | "alignAsTable": true, 293 | "avg": true, 294 | "current": true, 295 | "max": true, 296 | "min": true, 297 | "rightSide": true, 298 | "show": true, 299 | "total": false, 300 | "values": true 301 | }, 302 | "lines": true, 303 | "linewidth": 1, 304 | "nullPointMode": "null as zero", 305 | "options": { 306 | "alertThreshold": true 307 | }, 308 | "percentage": false, 309 | "pluginVersion": "7.3.6", 310 | "pointradius": 2, 311 | "points": true, 312 | "renderer": "flot", 313 | "repeat": null, 314 | "repeatDirection": "v", 315 | "seriesOverrides": [], 316 | "spaceLength": 10, 317 | "stack": false, 318 | "steppedLine": false, 319 | "targets": [ 320 | { 321 | "queryType": "raw", 322 | "rawQuery": "SELECT time, measure_value::double as ${measure} FROM $__database.$__table WHERE measure_name = '${measure}' AND DeviceID = '${device_id}' AND $__timeFilter order by time DESC", 323 | "refId": "A" 324 | } 325 | ], 326 | "thresholds": [ 327 | { 328 | "colorMode": "critical", 329 | "fill": true, 330 | "line": true, 331 | "op": "gt", 332 | "value": 400, 333 | "yaxis": "left" 334 | }, 335 | { 336 | "colorMode": "critical", 337 | "fill": true, 338 | "line": true, 339 | "op": "lt", 340 | "value": 200, 341 | "yaxis": "left" 342 | } 343 | ], 344 | "timeFrom": null, 345 | "timeRegions": [], 346 | "timeShift": null, 347 | "title": "Measure of a single device", 348 | "tooltip": { 349 | "shared": true, 350 | "sort": 0, 351 | "value_type": "individual" 352 | }, 353 | "type": "graph", 354 | "xaxis": { 355 | "buckets": null, 356 | "mode": "time", 357 | "name": null, 358 | "show": true, 359 | "values": [] 360 | }, 361 | "yaxes": [ 362 | { 363 | "format": "none", 364 | "label": null, 365 | "logBase": 1, 366 | "max": null, 367 | "min": null, 368 | "show": true 369 | }, 370 | { 371 | "format": "short", 372 | "label": null, 373 | "logBase": 1, 374 | "max": null, 375 | "min": null, 376 | "show": true 377 | } 378 | ], 379 | "yaxis": { 380 | "align": false, 381 | "alignLevel": null 382 | } 383 | }, 384 | { 385 | "aliasColors": {}, 386 | "bars": false, 387 | "dashLength": 10, 388 | "dashes": false, 389 | "datasource": null, 390 | "fieldConfig": { 391 | "defaults": { 392 | "custom": { 393 | "align": null, 394 | "filterable": false 395 | }, 396 | "mappings": [], 397 | "thresholds": { 398 | "mode": "absolute", 399 | "steps": [ 400 | { 401 | "color": "green", 402 | "value": null 403 | }, 404 | { 405 | "color": "red", 406 | "value": 80 407 | } 408 | ] 409 | }, 410 | "unit": "none" 411 | }, 412 | "overrides": [] 413 | }, 414 | "fill": 1, 415 | "fillGradient": 3, 416 | "gridPos": { 417 | "h": 10, 418 | "w": 12, 419 | "x": 0, 420 | "y": 16 421 | }, 422 | "hiddenSeries": false, 423 | "id": 14, 424 | "legend": { 425 | "alignAsTable": false, 426 | "avg": false, 427 | "current": false, 428 | "max": false, 429 | "min": false, 430 | "rightSide": true, 431 | "show": true, 432 | "total": false, 433 | "values": false 434 | }, 435 | "lines": true, 436 | "linewidth": 1, 437 | "nullPointMode": "null", 438 | "options": { 439 | "alertThreshold": false 440 | }, 441 | "percentage": false, 442 | "pluginVersion": "7.3.6", 443 | "pointradius": 2, 444 | "points": false, 445 | "renderer": "flot", 446 | "seriesOverrides": [ 447 | {} 448 | ], 449 | "spaceLength": 10, 450 | "stack": false, 451 | "steppedLine": false, 452 | "targets": [ 453 | { 454 | "queryType": "raw", 455 | "rawQuery": "SELECT DeviceID, CREATE_TIME_SERIES(time, measure_value::double) FROM $__database.$__table WHERE $__timeFilter AND measure_name = '${measure}' group by DeviceID", 456 | "refId": "B" 457 | } 458 | ], 459 | "thresholds": [], 460 | "timeFrom": null, 461 | "timeRegions": [], 462 | "timeShift": null, 463 | "title": "Measure all devices", 464 | "tooltip": { 465 | "shared": true, 466 | "sort": 0, 467 | "value_type": "individual" 468 | }, 469 | "transformations": [], 470 | "type": "graph", 471 | "xaxis": { 472 | "buckets": null, 473 | "mode": "time", 474 | "name": null, 475 | "show": true, 476 | "values": [] 477 | }, 478 | "yaxes": [ 479 | { 480 | "format": "none", 481 | "label": null, 482 | "logBase": 1, 483 | "max": null, 484 | "min": null, 485 | "show": true 486 | }, 487 | { 488 | "format": "short", 489 | "label": null, 490 | "logBase": 1, 491 | "max": null, 492 | "min": null, 493 | "show": true 494 | } 495 | ], 496 | "yaxis": { 497 | "align": false, 498 | "alignLevel": null 499 | } 500 | } 501 | ], 502 | "refresh": "5m", 503 | "schemaVersion": 26, 504 | "style": "dark", 505 | "tags": [ 506 | "Amazon Timeseries", 507 | "sample" 508 | ], 509 | "templating": { 510 | "list": [ 511 | { 512 | "allValue": null, 513 | "datasource": null, 514 | "definition": "SELECT distinct DeviceID FROM \"TimestreamDB\".\"SampleMetricsTable\"", 515 | "error": null, 516 | "hide": 0, 517 | "includeAll": false, 518 | "label": "Device", 519 | "multi": false, 520 | "name": "device_id", 521 | "options": [], 522 | "query": "SELECT distinct DeviceID FROM \"TimestreamDB\".\"SampleMetricsTable\"", 523 | "refresh": 1, 524 | "regex": "", 525 | "skipUrlSync": false, 526 | "sort": 0, 527 | "tagValuesQuery": "", 528 | "tags": [], 529 | "tagsQuery": "", 530 | "type": "query", 531 | "useTags": false 532 | }, 533 | { 534 | "allValue": null, 535 | "datasource": null, 536 | "definition": "SELECT distinct measure_name FROM \"TimestreamDB\".\"SampleMetricsTable\"", 537 | "error": null, 538 | "hide": 0, 539 | "includeAll": false, 540 | "label": "Measure", 541 | "multi": false, 542 | "name": "measure", 543 | "options": [], 544 | "query": "SELECT distinct measure_name FROM \"TimestreamDB\".\"SampleMetricsTable\"", 545 | "refresh": 1, 546 | "regex": "", 547 | "skipUrlSync": false, 548 | "sort": 1, 549 | "tagValuesQuery": "", 550 | "tags": [], 551 | "tagsQuery": "", 552 | "type": "query", 553 | "useTags": false 554 | } 555 | ] 556 | }, 557 | "time": { 558 | "from": "now-5m", 559 | "to": "now" 560 | }, 561 | "timezone": "browser", 562 | "id": null, 563 | "title": "Sample Amazon Timestream Dashboard", 564 | "version": 0 565 | }, 566 | "overwrite": true 567 | } -------------------------------------------------------------------------------- /assets/DeviceSimulatorSampleProducer.svg: -------------------------------------------------------------------------------- 1 | 2 | 6 | 7 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 24 | 25 | IoT Device Simulator (sample producer) 26 | 27 | 28 | 30 | 32 | 34 | 35 | 36 | 38 |
40 |
41 |
42 | triggers 43 |
44 |
45 |
46 |
47 | 48 | triggers 49 | 50 |
51 |
52 | 53 | 55 | 56 | 57 | 59 |
61 |
62 |
63 | Amazon EventBridge
(every 1 min event) 64 |
65 |
66 |
67 |
68 | Amazon 69 | EventB... 70 | 71 |
72 |
73 | 74 | 76 | 78 | 80 | 81 | 82 | 84 |
86 |
87 |
88 | AWS Lambda 89 |
90 |
91 |
92 |
93 | AWS 94 | Lambda 95 | 96 |
97 |
98 | 100 | 102 | 103 | 104 | 106 |
108 |
109 |
110 | Amazon Kinesis Data Stream 111 |
112 |
113 |
114 |
115 | 116 | Amazon Kinesi... 117 | 118 |
119 |
120 | 122 | 124 | 125 | 126 | 127 | 128 | 130 |
132 |
133 |
134 | { 135 |
136 |     DeviceId: uuid 137 |
    Measure1: value
    MeasureN: value
    Time: timestamp
} 138 |
139 |
140 |
141 |
142 | {... 143 |
144 |
145 |
146 | 147 | 148 | 150 | Viewer does not support full SVG 1.1 151 | 152 | 153 |
-------------------------------------------------------------------------------- /assets/OverallArchitecture.svg: -------------------------------------------------------------------------------- 1 | 2 | 6 | 7 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 25 | 26 | 28 | 29 | 30 | 32 |
34 |
35 |
36 | Device 37 |
38 |
39 |
40 |
41 | 42 | Device 43 | 44 |
45 |
46 | 47 | 49 | 51 | 53 | 54 | 55 | 57 |
59 |
60 |
61 | Amazon Kinesis Data Stream 62 |
63 |
64 |
65 |
66 | Amazon 67 | Kinesi... 68 | 69 |
70 |
71 | 72 | 74 | 76 | 78 | 79 | 80 | 82 |
84 |
85 |
86 | Amazon Kinesis Data Analytics
for Apache Flink 87 |
88 |
89 |
90 |
91 | Amazon 92 | Kinesi... 93 | 94 |
95 |
96 | 97 | 99 | 101 | 103 | 104 | 105 | 107 |
109 |
110 |
111 | Amazon Timestream 112 |
113 |
114 |
115 |
116 | Amazon 117 | Timest... 118 | 119 |
120 |
121 | 124 | 125 | 126 | Grafana 127 | 128 |
129 | 130 | 131 | 133 | Viewer does not support full SVG 1.1 134 | 135 | 136 |
--------------------------------------------------------------------------------